1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
59 #include "opcode/aarch64.h"
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 #define HA_MAX_NUM_FLDS 4
70 /* All possible aarch64 target descriptors. */
71 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1];
73 /* The standard register names, and all the valid aliases for them. */
76 const char *const name
;
78 } aarch64_register_aliases
[] =
80 /* 64-bit register names. */
81 {"fp", AARCH64_FP_REGNUM
},
82 {"lr", AARCH64_LR_REGNUM
},
83 {"sp", AARCH64_SP_REGNUM
},
85 /* 32-bit register names. */
86 {"w0", AARCH64_X0_REGNUM
+ 0},
87 {"w1", AARCH64_X0_REGNUM
+ 1},
88 {"w2", AARCH64_X0_REGNUM
+ 2},
89 {"w3", AARCH64_X0_REGNUM
+ 3},
90 {"w4", AARCH64_X0_REGNUM
+ 4},
91 {"w5", AARCH64_X0_REGNUM
+ 5},
92 {"w6", AARCH64_X0_REGNUM
+ 6},
93 {"w7", AARCH64_X0_REGNUM
+ 7},
94 {"w8", AARCH64_X0_REGNUM
+ 8},
95 {"w9", AARCH64_X0_REGNUM
+ 9},
96 {"w10", AARCH64_X0_REGNUM
+ 10},
97 {"w11", AARCH64_X0_REGNUM
+ 11},
98 {"w12", AARCH64_X0_REGNUM
+ 12},
99 {"w13", AARCH64_X0_REGNUM
+ 13},
100 {"w14", AARCH64_X0_REGNUM
+ 14},
101 {"w15", AARCH64_X0_REGNUM
+ 15},
102 {"w16", AARCH64_X0_REGNUM
+ 16},
103 {"w17", AARCH64_X0_REGNUM
+ 17},
104 {"w18", AARCH64_X0_REGNUM
+ 18},
105 {"w19", AARCH64_X0_REGNUM
+ 19},
106 {"w20", AARCH64_X0_REGNUM
+ 20},
107 {"w21", AARCH64_X0_REGNUM
+ 21},
108 {"w22", AARCH64_X0_REGNUM
+ 22},
109 {"w23", AARCH64_X0_REGNUM
+ 23},
110 {"w24", AARCH64_X0_REGNUM
+ 24},
111 {"w25", AARCH64_X0_REGNUM
+ 25},
112 {"w26", AARCH64_X0_REGNUM
+ 26},
113 {"w27", AARCH64_X0_REGNUM
+ 27},
114 {"w28", AARCH64_X0_REGNUM
+ 28},
115 {"w29", AARCH64_X0_REGNUM
+ 29},
116 {"w30", AARCH64_X0_REGNUM
+ 30},
119 {"ip0", AARCH64_X0_REGNUM
+ 16},
120 {"ip1", AARCH64_X0_REGNUM
+ 17}
123 /* The required core 'R' registers. */
124 static const char *const aarch64_r_register_names
[] =
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_X0_REGNUM! */
128 "x0", "x1", "x2", "x3",
129 "x4", "x5", "x6", "x7",
130 "x8", "x9", "x10", "x11",
131 "x12", "x13", "x14", "x15",
132 "x16", "x17", "x18", "x19",
133 "x20", "x21", "x22", "x23",
134 "x24", "x25", "x26", "x27",
135 "x28", "x29", "x30", "sp",
139 /* The FP/SIMD 'V' registers. */
140 static const char *const aarch64_v_register_names
[] =
142 /* These registers must appear in consecutive RAW register number
143 order and they must begin with AARCH64_V0_REGNUM! */
144 "v0", "v1", "v2", "v3",
145 "v4", "v5", "v6", "v7",
146 "v8", "v9", "v10", "v11",
147 "v12", "v13", "v14", "v15",
148 "v16", "v17", "v18", "v19",
149 "v20", "v21", "v22", "v23",
150 "v24", "v25", "v26", "v27",
151 "v28", "v29", "v30", "v31",
156 /* The SVE 'Z' and 'P' registers. */
157 static const char *const aarch64_sve_register_names
[] =
159 /* These registers must appear in consecutive RAW register number
160 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
161 "z0", "z1", "z2", "z3",
162 "z4", "z5", "z6", "z7",
163 "z8", "z9", "z10", "z11",
164 "z12", "z13", "z14", "z15",
165 "z16", "z17", "z18", "z19",
166 "z20", "z21", "z22", "z23",
167 "z24", "z25", "z26", "z27",
168 "z28", "z29", "z30", "z31",
170 "p0", "p1", "p2", "p3",
171 "p4", "p5", "p6", "p7",
172 "p8", "p9", "p10", "p11",
173 "p12", "p13", "p14", "p15",
177 /* AArch64 prologue cache structure. */
178 struct aarch64_prologue_cache
180 /* The program counter at the start of the function. It is used to
181 identify this frame as a prologue frame. */
184 /* The program counter at the time this frame was created; i.e. where
185 this function was called from. It is used to identify this frame as a
189 /* The stack pointer at the time this frame was created; i.e. the
190 caller's stack pointer when this function was called. It is used
191 to identify this frame. */
194 /* Is the target available to read from? */
197 /* The frame base for this frame is just prev_sp - frame size.
198 FRAMESIZE is the distance from the frame pointer to the
199 initial stack pointer. */
202 /* The register used to hold the frame pointer for this frame. */
205 /* Saved register offsets. */
206 struct trad_frame_saved_reg
*saved_regs
;
210 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
211 struct cmd_list_element
*c
, const char *value
)
213 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
218 /* Abstract instruction reader. */
220 class abstract_instruction_reader
223 /* Read in one instruction. */
224 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
225 enum bfd_endian byte_order
) = 0;
228 /* Instruction reader from real target. */
230 class instruction_reader
: public abstract_instruction_reader
233 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
236 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
242 /* Analyze a prologue, looking for a recognizable stack frame
243 and frame pointer. Scan until we encounter a store that could
244 clobber the stack frame unexpectedly, or an unknown instruction. */
247 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
248 CORE_ADDR start
, CORE_ADDR limit
,
249 struct aarch64_prologue_cache
*cache
,
250 abstract_instruction_reader
& reader
)
252 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
254 /* Track X registers and D registers in prologue. */
255 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
257 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
258 regs
[i
] = pv_register (i
, 0);
259 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
261 for (; start
< limit
; start
+= 4)
266 insn
= reader
.read (start
, 4, byte_order_for_code
);
268 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
271 if (inst
.opcode
->iclass
== addsub_imm
272 && (inst
.opcode
->op
== OP_ADD
273 || strcmp ("sub", inst
.opcode
->name
) == 0))
275 unsigned rd
= inst
.operands
[0].reg
.regno
;
276 unsigned rn
= inst
.operands
[1].reg
.regno
;
278 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
279 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
280 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
281 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
283 if (inst
.opcode
->op
== OP_ADD
)
285 regs
[rd
] = pv_add_constant (regs
[rn
],
286 inst
.operands
[2].imm
.value
);
290 regs
[rd
] = pv_add_constant (regs
[rn
],
291 -inst
.operands
[2].imm
.value
);
294 else if (inst
.opcode
->iclass
== pcreladdr
295 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
297 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
298 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
300 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
302 else if (inst
.opcode
->iclass
== branch_imm
)
304 /* Stop analysis on branch. */
307 else if (inst
.opcode
->iclass
== condbranch
)
309 /* Stop analysis on branch. */
312 else if (inst
.opcode
->iclass
== branch_reg
)
314 /* Stop analysis on branch. */
317 else if (inst
.opcode
->iclass
== compbranch
)
319 /* Stop analysis on branch. */
322 else if (inst
.opcode
->op
== OP_MOVZ
)
324 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
325 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
327 else if (inst
.opcode
->iclass
== log_shift
328 && strcmp (inst
.opcode
->name
, "orr") == 0)
330 unsigned rd
= inst
.operands
[0].reg
.regno
;
331 unsigned rn
= inst
.operands
[1].reg
.regno
;
332 unsigned rm
= inst
.operands
[2].reg
.regno
;
334 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
335 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
336 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
338 if (inst
.operands
[2].shifter
.amount
== 0
339 && rn
== AARCH64_SP_REGNUM
)
345 debug_printf ("aarch64: prologue analysis gave up "
346 "addr=%s opcode=0x%x (orr x register)\n",
347 core_addr_to_string_nz (start
), insn
);
352 else if (inst
.opcode
->op
== OP_STUR
)
354 unsigned rt
= inst
.operands
[0].reg
.regno
;
355 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
357 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
359 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
360 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
361 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
362 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
364 stack
.store (pv_add_constant (regs
[rn
],
365 inst
.operands
[1].addr
.offset
.imm
),
366 is64
? 8 : 4, regs
[rt
]);
368 else if ((inst
.opcode
->iclass
== ldstpair_off
369 || (inst
.opcode
->iclass
== ldstpair_indexed
370 && inst
.operands
[2].addr
.preind
))
371 && strcmp ("stp", inst
.opcode
->name
) == 0)
373 /* STP with addressing mode Pre-indexed and Base register. */
376 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
377 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
379 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
380 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
381 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
382 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
383 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
384 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
386 /* If recording this store would invalidate the store area
387 (perhaps because rn is not known) then we should abandon
388 further prologue analysis. */
389 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
392 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
395 rt1
= inst
.operands
[0].reg
.regno
;
396 rt2
= inst
.operands
[1].reg
.regno
;
397 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
399 /* Only bottom 64-bit of each V register (D register) need
401 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
402 rt1
+= AARCH64_X_REGISTER_COUNT
;
403 rt2
+= AARCH64_X_REGISTER_COUNT
;
406 stack
.store (pv_add_constant (regs
[rn
], imm
), 8,
408 stack
.store (pv_add_constant (regs
[rn
], imm
+ 8), 8,
411 if (inst
.operands
[2].addr
.writeback
)
412 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
415 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
416 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
417 && (inst
.opcode
->op
== OP_STR_POS
418 || inst
.opcode
->op
== OP_STRF_POS
)))
419 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
420 && strcmp ("str", inst
.opcode
->name
) == 0)
422 /* STR (immediate) */
423 unsigned int rt
= inst
.operands
[0].reg
.regno
;
424 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
425 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
427 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
428 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
429 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
431 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
433 /* Only bottom 64-bit of each V register (D register) need
435 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
436 rt
+= AARCH64_X_REGISTER_COUNT
;
439 stack
.store (pv_add_constant (regs
[rn
], imm
),
440 is64
? 8 : 4, regs
[rt
]);
441 if (inst
.operands
[1].addr
.writeback
)
442 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
444 else if (inst
.opcode
->iclass
== testbranch
)
446 /* Stop analysis on branch. */
453 debug_printf ("aarch64: prologue analysis gave up addr=%s"
455 core_addr_to_string_nz (start
), insn
);
464 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
466 /* Frame pointer is fp. Frame size is constant. */
467 cache
->framereg
= AARCH64_FP_REGNUM
;
468 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
470 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
472 /* Try the stack pointer. */
473 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
474 cache
->framereg
= AARCH64_SP_REGNUM
;
478 /* We're just out of luck. We don't know where the frame is. */
479 cache
->framereg
= -1;
480 cache
->framesize
= 0;
483 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
487 if (stack
.find_reg (gdbarch
, i
, &offset
))
488 cache
->saved_regs
[i
].addr
= offset
;
491 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
493 int regnum
= gdbarch_num_regs (gdbarch
);
496 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
498 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
505 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
506 CORE_ADDR start
, CORE_ADDR limit
,
507 struct aarch64_prologue_cache
*cache
)
509 instruction_reader reader
;
511 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
517 namespace selftests
{
519 /* Instruction reader from manually cooked instruction sequences. */
521 class instruction_reader_test
: public abstract_instruction_reader
524 template<size_t SIZE
>
525 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
526 : m_insns (insns
), m_insns_size (SIZE
)
529 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
532 SELF_CHECK (len
== 4);
533 SELF_CHECK (memaddr
% 4 == 0);
534 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
536 return m_insns
[memaddr
/ 4];
540 const uint32_t *m_insns
;
545 aarch64_analyze_prologue_test (void)
547 struct gdbarch_info info
;
549 gdbarch_info_init (&info
);
550 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
552 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
553 SELF_CHECK (gdbarch
!= NULL
);
555 /* Test the simple prologue in which frame pointer is used. */
557 struct aarch64_prologue_cache cache
;
558 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
560 static const uint32_t insns
[] = {
561 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
562 0x910003fd, /* mov x29, sp */
563 0x97ffffe6, /* bl 0x400580 */
565 instruction_reader_test
reader (insns
);
567 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
568 SELF_CHECK (end
== 4 * 2);
570 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
571 SELF_CHECK (cache
.framesize
== 272);
573 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
575 if (i
== AARCH64_FP_REGNUM
)
576 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
577 else if (i
== AARCH64_LR_REGNUM
)
578 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
580 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
583 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
585 int regnum
= gdbarch_num_regs (gdbarch
);
587 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
592 /* Test a prologue in which STR is used and frame pointer is not
595 struct aarch64_prologue_cache cache
;
596 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
598 static const uint32_t insns
[] = {
599 0xf81d0ff3, /* str x19, [sp, #-48]! */
600 0xb9002fe0, /* str w0, [sp, #44] */
601 0xf90013e1, /* str x1, [sp, #32]*/
602 0xfd000fe0, /* str d0, [sp, #24] */
603 0xaa0203f3, /* mov x19, x2 */
604 0xf94013e0, /* ldr x0, [sp, #32] */
606 instruction_reader_test
reader (insns
);
608 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
610 SELF_CHECK (end
== 4 * 5);
612 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
613 SELF_CHECK (cache
.framesize
== 48);
615 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
618 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
620 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
622 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
625 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
627 int regnum
= gdbarch_num_regs (gdbarch
);
630 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
633 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
638 } // namespace selftests
639 #endif /* GDB_SELF_TEST */
641 /* Implement the "skip_prologue" gdbarch method. */
644 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
646 CORE_ADDR func_addr
, limit_pc
;
648 /* See if we can determine the end of the prologue via the symbol
649 table. If so, then return either PC, or the PC after the
650 prologue, whichever is greater. */
651 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
653 CORE_ADDR post_prologue_pc
654 = skip_prologue_using_sal (gdbarch
, func_addr
);
656 if (post_prologue_pc
!= 0)
657 return std::max (pc
, post_prologue_pc
);
660 /* Can't determine prologue from the symbol table, need to examine
663 /* Find an upper limit on the function prologue using the debug
664 information. If the debug information could not be used to
665 provide that bound, then use an arbitrary large number as the
667 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
669 limit_pc
= pc
+ 128; /* Magic. */
671 /* Try disassembling prologue. */
672 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
675 /* Scan the function prologue for THIS_FRAME and populate the prologue
679 aarch64_scan_prologue (struct frame_info
*this_frame
,
680 struct aarch64_prologue_cache
*cache
)
682 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
683 CORE_ADDR prologue_start
;
684 CORE_ADDR prologue_end
;
685 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
686 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
688 cache
->prev_pc
= prev_pc
;
690 /* Assume we do not find a frame. */
691 cache
->framereg
= -1;
692 cache
->framesize
= 0;
694 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
697 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
701 /* No line info so use the current PC. */
702 prologue_end
= prev_pc
;
704 else if (sal
.end
< prologue_end
)
706 /* The next line begins after the function end. */
707 prologue_end
= sal
.end
;
710 prologue_end
= std::min (prologue_end
, prev_pc
);
711 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
717 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
721 cache
->framereg
= AARCH64_FP_REGNUM
;
722 cache
->framesize
= 16;
723 cache
->saved_regs
[29].addr
= 0;
724 cache
->saved_regs
[30].addr
= 8;
728 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
729 function may throw an exception if the inferior's registers or memory is
733 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
734 struct aarch64_prologue_cache
*cache
)
736 CORE_ADDR unwound_fp
;
739 aarch64_scan_prologue (this_frame
, cache
);
741 if (cache
->framereg
== -1)
744 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
748 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
750 /* Calculate actual addresses of saved registers using offsets
751 determined by aarch64_analyze_prologue. */
752 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
753 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
754 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
756 cache
->func
= get_frame_func (this_frame
);
758 cache
->available_p
= 1;
761 /* Allocate and fill in *THIS_CACHE with information about the prologue of
762 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
763 Return a pointer to the current aarch64_prologue_cache in
766 static struct aarch64_prologue_cache
*
767 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
769 struct aarch64_prologue_cache
*cache
;
771 if (*this_cache
!= NULL
)
772 return (struct aarch64_prologue_cache
*) *this_cache
;
774 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
775 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
780 aarch64_make_prologue_cache_1 (this_frame
, cache
);
782 CATCH (ex
, RETURN_MASK_ERROR
)
784 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
785 throw_exception (ex
);
792 /* Implement the "stop_reason" frame_unwind method. */
794 static enum unwind_stop_reason
795 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
798 struct aarch64_prologue_cache
*cache
799 = aarch64_make_prologue_cache (this_frame
, this_cache
);
801 if (!cache
->available_p
)
802 return UNWIND_UNAVAILABLE
;
804 /* Halt the backtrace at "_start". */
805 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
806 return UNWIND_OUTERMOST
;
808 /* We've hit a wall, stop. */
809 if (cache
->prev_sp
== 0)
810 return UNWIND_OUTERMOST
;
812 return UNWIND_NO_REASON
;
815 /* Our frame ID for a normal frame is the current function's starting
816 PC and the caller's SP when we were called. */
819 aarch64_prologue_this_id (struct frame_info
*this_frame
,
820 void **this_cache
, struct frame_id
*this_id
)
822 struct aarch64_prologue_cache
*cache
823 = aarch64_make_prologue_cache (this_frame
, this_cache
);
825 if (!cache
->available_p
)
826 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
828 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
831 /* Implement the "prev_register" frame_unwind method. */
833 static struct value
*
834 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
835 void **this_cache
, int prev_regnum
)
837 struct aarch64_prologue_cache
*cache
838 = aarch64_make_prologue_cache (this_frame
, this_cache
);
840 /* If we are asked to unwind the PC, then we need to return the LR
841 instead. The prologue may save PC, but it will point into this
842 frame's prologue, not the next frame's resume location. */
843 if (prev_regnum
== AARCH64_PC_REGNUM
)
847 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
848 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
851 /* SP is generally not saved to the stack, but this frame is
852 identified by the next frame's stack pointer at the time of the
853 call. The value was already reconstructed into PREV_SP. */
866 if (prev_regnum
== AARCH64_SP_REGNUM
)
867 return frame_unwind_got_constant (this_frame
, prev_regnum
,
870 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
874 /* AArch64 prologue unwinder. */
875 struct frame_unwind aarch64_prologue_unwind
=
878 aarch64_prologue_frame_unwind_stop_reason
,
879 aarch64_prologue_this_id
,
880 aarch64_prologue_prev_register
,
882 default_frame_sniffer
885 /* Allocate and fill in *THIS_CACHE with information about the prologue of
886 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
887 Return a pointer to the current aarch64_prologue_cache in
890 static struct aarch64_prologue_cache
*
891 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
893 struct aarch64_prologue_cache
*cache
;
895 if (*this_cache
!= NULL
)
896 return (struct aarch64_prologue_cache
*) *this_cache
;
898 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
899 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
904 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
906 cache
->prev_pc
= get_frame_pc (this_frame
);
907 cache
->available_p
= 1;
909 CATCH (ex
, RETURN_MASK_ERROR
)
911 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
912 throw_exception (ex
);
919 /* Implement the "stop_reason" frame_unwind method. */
921 static enum unwind_stop_reason
922 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
925 struct aarch64_prologue_cache
*cache
926 = aarch64_make_stub_cache (this_frame
, this_cache
);
928 if (!cache
->available_p
)
929 return UNWIND_UNAVAILABLE
;
931 return UNWIND_NO_REASON
;
934 /* Our frame ID for a stub frame is the current SP and LR. */
937 aarch64_stub_this_id (struct frame_info
*this_frame
,
938 void **this_cache
, struct frame_id
*this_id
)
940 struct aarch64_prologue_cache
*cache
941 = aarch64_make_stub_cache (this_frame
, this_cache
);
943 if (cache
->available_p
)
944 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
946 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
949 /* Implement the "sniffer" frame_unwind method. */
952 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
953 struct frame_info
*this_frame
,
954 void **this_prologue_cache
)
956 CORE_ADDR addr_in_block
;
959 addr_in_block
= get_frame_address_in_block (this_frame
);
960 if (in_plt_section (addr_in_block
)
961 /* We also use the stub winder if the target memory is unreadable
962 to avoid having the prologue unwinder trying to read it. */
963 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
969 /* AArch64 stub unwinder. */
970 struct frame_unwind aarch64_stub_unwind
=
973 aarch64_stub_frame_unwind_stop_reason
,
974 aarch64_stub_this_id
,
975 aarch64_prologue_prev_register
,
977 aarch64_stub_unwind_sniffer
980 /* Return the frame base address of *THIS_FRAME. */
983 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
985 struct aarch64_prologue_cache
*cache
986 = aarch64_make_prologue_cache (this_frame
, this_cache
);
988 return cache
->prev_sp
- cache
->framesize
;
991 /* AArch64 default frame base information. */
992 struct frame_base aarch64_normal_base
=
994 &aarch64_prologue_unwind
,
995 aarch64_normal_frame_base
,
996 aarch64_normal_frame_base
,
997 aarch64_normal_frame_base
1000 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1001 dummy frame. The frame ID's base needs to match the TOS value
1002 saved by save_dummy_frame_tos () and returned from
1003 aarch64_push_dummy_call, and the PC needs to match the dummy
1004 frame's breakpoint. */
1006 static struct frame_id
1007 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1009 return frame_id_build (get_frame_register_unsigned (this_frame
,
1011 get_frame_pc (this_frame
));
1014 /* Implement the "unwind_pc" gdbarch method. */
1017 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1020 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1025 /* Implement the "unwind_sp" gdbarch method. */
1028 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1030 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1033 /* Return the value of the REGNUM register in the previous frame of
1036 static struct value
*
1037 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1038 void **this_cache
, int regnum
)
1044 case AARCH64_PC_REGNUM
:
1045 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1046 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1049 internal_error (__FILE__
, __LINE__
,
1050 _("Unexpected register %d"), regnum
);
1054 /* Implement the "init_reg" dwarf2_frame_ops method. */
1057 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1058 struct dwarf2_frame_state_reg
*reg
,
1059 struct frame_info
*this_frame
)
1063 case AARCH64_PC_REGNUM
:
1064 reg
->how
= DWARF2_FRAME_REG_FN
;
1065 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1067 case AARCH64_SP_REGNUM
:
1068 reg
->how
= DWARF2_FRAME_REG_CFA
;
1073 /* When arguments must be pushed onto the stack, they go on in reverse
1074 order. The code below implements a FILO (stack) to do this. */
1078 /* Value to pass on stack. It can be NULL if this item is for stack
1080 const gdb_byte
*data
;
1082 /* Size in bytes of value to pass on stack. */
1086 DEF_VEC_O (stack_item_t
);
1088 /* Return the alignment (in bytes) of the given type. */
1091 aarch64_type_align (struct type
*t
)
1097 t
= check_typedef (t
);
1098 switch (TYPE_CODE (t
))
1101 /* Should never happen. */
1102 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1106 case TYPE_CODE_ENUM
:
1110 case TYPE_CODE_RANGE
:
1111 case TYPE_CODE_BITSTRING
:
1113 case TYPE_CODE_RVALUE_REF
:
1114 case TYPE_CODE_CHAR
:
1115 case TYPE_CODE_BOOL
:
1116 return TYPE_LENGTH (t
);
1118 case TYPE_CODE_ARRAY
:
1119 if (TYPE_VECTOR (t
))
1121 /* Use the natural alignment for vector types (the same for
1122 scalar type), but the maximum alignment is 128-bit. */
1123 if (TYPE_LENGTH (t
) > 16)
1126 return TYPE_LENGTH (t
);
1129 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1130 case TYPE_CODE_COMPLEX
:
1131 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1133 case TYPE_CODE_STRUCT
:
1134 case TYPE_CODE_UNION
:
1136 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1138 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1146 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1148 Return the number of register required, or -1 on failure.
1150 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1151 to the element, else fail if the type of this element does not match the
1155 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1156 struct type
**fundamental_type
)
1158 if (type
== nullptr)
1161 switch (TYPE_CODE (type
))
1164 if (TYPE_LENGTH (type
) > 16)
1167 if (*fundamental_type
== nullptr)
1168 *fundamental_type
= type
;
1169 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1170 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1175 case TYPE_CODE_COMPLEX
:
1177 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1178 if (TYPE_LENGTH (target_type
) > 16)
1181 if (*fundamental_type
== nullptr)
1182 *fundamental_type
= target_type
;
1183 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1184 || TYPE_CODE (target_type
) != TYPE_CODE (*fundamental_type
))
1190 case TYPE_CODE_ARRAY
:
1192 if (TYPE_VECTOR (type
))
1194 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1197 if (*fundamental_type
== nullptr)
1198 *fundamental_type
= type
;
1199 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1200 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1207 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1208 int count
= aapcs_is_vfp_call_or_return_candidate_1
1209 (target_type
, fundamental_type
);
1214 count
*= TYPE_LENGTH (type
);
1219 case TYPE_CODE_STRUCT
:
1220 case TYPE_CODE_UNION
:
1224 for (int i
= 0; i
< TYPE_NFIELDS (type
); i
++)
1226 struct type
*member
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
1228 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1229 (member
, fundamental_type
);
1230 if (sub_count
== -1)
1244 /* Return true if an argument, whose type is described by TYPE, can be passed or
1245 returned in simd/fp registers, providing enough parameter passing registers
1246 are available. This is as described in the AAPCS64.
1248 Upon successful return, *COUNT returns the number of needed registers,
1249 *FUNDAMENTAL_TYPE contains the type of those registers.
1251 Candidate as per the AAPCS64 5.4.2.C is either a:
1254 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1255 all the members are floats and has at most 4 members.
1256 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1257 all the members are short vectors and has at most 4 members.
1260 Note that HFAs and HVAs can include nested structures and arrays. */
1263 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1264 struct type
**fundamental_type
)
1266 if (type
== nullptr)
1269 *fundamental_type
= nullptr;
1271 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1274 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1283 /* AArch64 function call information structure. */
1284 struct aarch64_call_info
1286 /* the current argument number. */
1289 /* The next general purpose register number, equivalent to NGRN as
1290 described in the AArch64 Procedure Call Standard. */
1293 /* The next SIMD and floating point register number, equivalent to
1294 NSRN as described in the AArch64 Procedure Call Standard. */
1297 /* The next stacked argument address, equivalent to NSAA as
1298 described in the AArch64 Procedure Call Standard. */
1301 /* Stack item vector. */
1302 VEC(stack_item_t
) *si
;
1305 /* Pass a value in a sequence of consecutive X registers. The caller
1306 is responsbile for ensuring sufficient registers are available. */
1309 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1310 struct aarch64_call_info
*info
, struct type
*type
,
1313 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1314 int len
= TYPE_LENGTH (type
);
1315 enum type_code typecode
= TYPE_CODE (type
);
1316 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1317 const bfd_byte
*buf
= value_contents (arg
);
1323 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1324 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1328 /* Adjust sub-word struct/union args when big-endian. */
1329 if (byte_order
== BFD_ENDIAN_BIG
1330 && partial_len
< X_REGISTER_SIZE
1331 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1332 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1336 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1337 gdbarch_register_name (gdbarch
, regnum
),
1338 phex (regval
, X_REGISTER_SIZE
));
1340 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1347 /* Attempt to marshall a value in a V register. Return 1 if
1348 successful, or 0 if insufficient registers are available. This
1349 function, unlike the equivalent pass_in_x() function does not
1350 handle arguments spread across multiple registers. */
1353 pass_in_v (struct gdbarch
*gdbarch
,
1354 struct regcache
*regcache
,
1355 struct aarch64_call_info
*info
,
1356 int len
, const bfd_byte
*buf
)
1360 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1361 /* Enough space for a full vector register. */
1362 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1363 gdb_assert (len
<= sizeof (reg
));
1368 memset (reg
, 0, sizeof (reg
));
1369 /* PCS C.1, the argument is allocated to the least significant
1370 bits of V register. */
1371 memcpy (reg
, buf
, len
);
1372 regcache
->cooked_write (regnum
, reg
);
1376 debug_printf ("arg %d in %s\n", info
->argnum
,
1377 gdbarch_register_name (gdbarch
, regnum
));
1385 /* Marshall an argument onto the stack. */
1388 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1391 const bfd_byte
*buf
= value_contents (arg
);
1392 int len
= TYPE_LENGTH (type
);
1398 align
= aarch64_type_align (type
);
1400 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1401 Natural alignment of the argument's type. */
1402 align
= align_up (align
, 8);
1404 /* The AArch64 PCS requires at most doubleword alignment. */
1410 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1416 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1419 if (info
->nsaa
& (align
- 1))
1421 /* Push stack alignment padding. */
1422 int pad
= align
- (info
->nsaa
& (align
- 1));
1427 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1432 /* Marshall an argument into a sequence of one or more consecutive X
1433 registers or, if insufficient X registers are available then onto
1437 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1438 struct aarch64_call_info
*info
, struct type
*type
,
1441 int len
= TYPE_LENGTH (type
);
1442 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1444 /* PCS C.13 - Pass in registers if we have enough spare */
1445 if (info
->ngrn
+ nregs
<= 8)
1447 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1448 info
->ngrn
+= nregs
;
1453 pass_on_stack (info
, type
, arg
);
1457 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1458 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1459 registers. A return value of false is an error state as the value will have
1460 been partially passed to the stack. */
1462 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1463 struct aarch64_call_info
*info
, struct type
*arg_type
,
1466 switch (TYPE_CODE (arg_type
))
1469 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1470 value_contents (arg
));
1473 case TYPE_CODE_COMPLEX
:
1475 const bfd_byte
*buf
= value_contents (arg
);
1476 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1478 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1482 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1483 buf
+ TYPE_LENGTH (target_type
));
1486 case TYPE_CODE_ARRAY
:
1487 if (TYPE_VECTOR (arg_type
))
1488 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1489 value_contents (arg
));
1492 case TYPE_CODE_STRUCT
:
1493 case TYPE_CODE_UNION
:
1494 for (int i
= 0; i
< TYPE_NFIELDS (arg_type
); i
++)
1496 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1497 struct type
*field_type
= check_typedef (value_type (field
));
1499 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1510 /* Implement the "push_dummy_call" gdbarch method. */
1513 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1514 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1516 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1517 CORE_ADDR struct_addr
)
1520 struct aarch64_call_info info
;
1521 struct type
*func_type
;
1522 struct type
*return_type
;
1523 int lang_struct_return
;
1525 memset (&info
, 0, sizeof (info
));
1527 /* We need to know what the type of the called function is in order
1528 to determine the number of named/anonymous arguments for the
1529 actual argument placement, and the return type in order to handle
1530 return value correctly.
1532 The generic code above us views the decision of return in memory
1533 or return in registers as a two stage processes. The language
1534 handler is consulted first and may decide to return in memory (eg
1535 class with copy constructor returned by value), this will cause
1536 the generic code to allocate space AND insert an initial leading
1539 If the language code does not decide to pass in memory then the
1540 target code is consulted.
1542 If the language code decides to pass in memory we want to move
1543 the pointer inserted as the initial argument from the argument
1544 list and into X8, the conventional AArch64 struct return pointer
1547 This is slightly awkward, ideally the flag "lang_struct_return"
1548 would be passed to the targets implementation of push_dummy_call.
1549 Rather that change the target interface we call the language code
1550 directly ourselves. */
1552 func_type
= check_typedef (value_type (function
));
1554 /* Dereference function pointer types. */
1555 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1556 func_type
= TYPE_TARGET_TYPE (func_type
);
1558 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1559 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1561 /* If language_pass_by_reference () returned true we will have been
1562 given an additional initial argument, a hidden pointer to the
1563 return slot in memory. */
1564 return_type
= TYPE_TARGET_TYPE (func_type
);
1565 lang_struct_return
= language_pass_by_reference (return_type
);
1567 /* Set the return address. For the AArch64, the return breakpoint
1568 is always at BP_ADDR. */
1569 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1571 /* If we were given an initial argument for the return slot because
1572 lang_struct_return was true, lose it. */
1573 if (lang_struct_return
)
1579 /* The struct_return pointer occupies X8. */
1580 if (struct_return
|| lang_struct_return
)
1584 debug_printf ("struct return in %s = 0x%s\n",
1585 gdbarch_register_name (gdbarch
,
1586 AARCH64_STRUCT_RETURN_REGNUM
),
1587 paddress (gdbarch
, struct_addr
));
1589 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1593 for (argnum
= 0; argnum
< nargs
; argnum
++)
1595 struct value
*arg
= args
[argnum
];
1596 struct type
*arg_type
, *fundamental_type
;
1599 arg_type
= check_typedef (value_type (arg
));
1600 len
= TYPE_LENGTH (arg_type
);
1602 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1603 if there are enough spare registers. */
1604 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1607 if (info
.nsrn
+ elements
<= 8)
1609 /* We know that we have sufficient registers available therefore
1610 this will never need to fallback to the stack. */
1611 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1613 gdb_assert_not_reached ("Failed to push args");
1618 pass_on_stack (&info
, arg_type
, arg
);
1623 switch (TYPE_CODE (arg_type
))
1626 case TYPE_CODE_BOOL
:
1627 case TYPE_CODE_CHAR
:
1628 case TYPE_CODE_RANGE
:
1629 case TYPE_CODE_ENUM
:
1632 /* Promote to 32 bit integer. */
1633 if (TYPE_UNSIGNED (arg_type
))
1634 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1636 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1637 arg
= value_cast (arg_type
, arg
);
1639 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1642 case TYPE_CODE_STRUCT
:
1643 case TYPE_CODE_ARRAY
:
1644 case TYPE_CODE_UNION
:
1647 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1648 invisible reference. */
1650 /* Allocate aligned storage. */
1651 sp
= align_down (sp
- len
, 16);
1653 /* Write the real data into the stack. */
1654 write_memory (sp
, value_contents (arg
), len
);
1656 /* Construct the indirection. */
1657 arg_type
= lookup_pointer_type (arg_type
);
1658 arg
= value_from_pointer (arg_type
, sp
);
1659 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1662 /* PCS C.15 / C.18 multiple values pass. */
1663 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1667 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1672 /* Make sure stack retains 16 byte alignment. */
1674 sp
-= 16 - (info
.nsaa
& 15);
1676 while (!VEC_empty (stack_item_t
, info
.si
))
1678 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1681 if (si
->data
!= NULL
)
1682 write_memory (sp
, si
->data
, si
->len
);
1683 VEC_pop (stack_item_t
, info
.si
);
1686 VEC_free (stack_item_t
, info
.si
);
1688 /* Finally, update the SP register. */
1689 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1694 /* Implement the "frame_align" gdbarch method. */
1697 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1699 /* Align the stack to sixteen bytes. */
1700 return sp
& ~(CORE_ADDR
) 15;
1703 /* Return the type for an AdvSISD Q register. */
1705 static struct type
*
1706 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1708 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1710 if (tdep
->vnq_type
== NULL
)
1715 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1718 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1719 append_composite_type_field (t
, "u", elem
);
1721 elem
= builtin_type (gdbarch
)->builtin_int128
;
1722 append_composite_type_field (t
, "s", elem
);
1727 return tdep
->vnq_type
;
1730 /* Return the type for an AdvSISD D register. */
1732 static struct type
*
1733 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1735 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1737 if (tdep
->vnd_type
== NULL
)
1742 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1745 elem
= builtin_type (gdbarch
)->builtin_double
;
1746 append_composite_type_field (t
, "f", elem
);
1748 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1749 append_composite_type_field (t
, "u", elem
);
1751 elem
= builtin_type (gdbarch
)->builtin_int64
;
1752 append_composite_type_field (t
, "s", elem
);
1757 return tdep
->vnd_type
;
1760 /* Return the type for an AdvSISD S register. */
1762 static struct type
*
1763 aarch64_vns_type (struct gdbarch
*gdbarch
)
1765 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1767 if (tdep
->vns_type
== NULL
)
1772 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1775 elem
= builtin_type (gdbarch
)->builtin_float
;
1776 append_composite_type_field (t
, "f", elem
);
1778 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1779 append_composite_type_field (t
, "u", elem
);
1781 elem
= builtin_type (gdbarch
)->builtin_int32
;
1782 append_composite_type_field (t
, "s", elem
);
1787 return tdep
->vns_type
;
1790 /* Return the type for an AdvSISD H register. */
1792 static struct type
*
1793 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1795 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1797 if (tdep
->vnh_type
== NULL
)
1802 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1805 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1806 append_composite_type_field (t
, "u", elem
);
1808 elem
= builtin_type (gdbarch
)->builtin_int16
;
1809 append_composite_type_field (t
, "s", elem
);
1814 return tdep
->vnh_type
;
1817 /* Return the type for an AdvSISD B register. */
1819 static struct type
*
1820 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1822 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1824 if (tdep
->vnb_type
== NULL
)
1829 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1832 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1833 append_composite_type_field (t
, "u", elem
);
1835 elem
= builtin_type (gdbarch
)->builtin_int8
;
1836 append_composite_type_field (t
, "s", elem
);
1841 return tdep
->vnb_type
;
1844 /* Return the type for an AdvSISD V register. */
1846 static struct type
*
1847 aarch64_vnv_type (struct gdbarch
*gdbarch
)
1849 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1851 if (tdep
->vnv_type
== NULL
)
1853 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
1856 append_composite_type_field (t
, "d", aarch64_vnd_type (gdbarch
));
1857 append_composite_type_field (t
, "s", aarch64_vns_type (gdbarch
));
1858 append_composite_type_field (t
, "h", aarch64_vnh_type (gdbarch
));
1859 append_composite_type_field (t
, "b", aarch64_vnb_type (gdbarch
));
1860 append_composite_type_field (t
, "q", aarch64_vnq_type (gdbarch
));
1865 return tdep
->vnv_type
;
1868 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1871 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1873 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1874 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1876 if (reg
== AARCH64_DWARF_SP
)
1877 return AARCH64_SP_REGNUM
;
1879 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1880 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1882 if (reg
== AARCH64_DWARF_SVE_VG
)
1883 return AARCH64_SVE_VG_REGNUM
;
1885 if (reg
== AARCH64_DWARF_SVE_FFR
)
1886 return AARCH64_SVE_FFR_REGNUM
;
1888 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
1889 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
1891 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
1892 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
1897 /* Implement the "print_insn" gdbarch method. */
1900 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1902 info
->symbols
= NULL
;
1903 return default_print_insn (memaddr
, info
);
1906 /* AArch64 BRK software debug mode instruction.
1907 Note that AArch64 code is always little-endian.
1908 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1909 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1911 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
1913 /* Extract from an array REGS containing the (raw) register state a
1914 function return value of type TYPE, and copy that, in virtual
1915 format, into VALBUF. */
1918 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1921 struct gdbarch
*gdbarch
= regs
->arch ();
1922 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1924 struct type
*fundamental_type
;
1926 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
1929 int len
= TYPE_LENGTH (fundamental_type
);
1931 for (int i
= 0; i
< elements
; i
++)
1933 int regno
= AARCH64_V0_REGNUM
+ i
;
1934 /* Enough space for a full vector register. */
1935 gdb_byte buf
[register_size (gdbarch
, regno
)];
1936 gdb_assert (len
<= sizeof (buf
));
1940 debug_printf ("read HFA or HVA return value element %d from %s\n",
1942 gdbarch_register_name (gdbarch
, regno
));
1944 regs
->cooked_read (regno
, buf
);
1946 memcpy (valbuf
, buf
, len
);
1950 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1951 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1952 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1953 || TYPE_CODE (type
) == TYPE_CODE_PTR
1954 || TYPE_IS_REFERENCE (type
)
1955 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1957 /* If the the type is a plain integer, then the access is
1958 straight-forward. Otherwise we have to play around a bit
1960 int len
= TYPE_LENGTH (type
);
1961 int regno
= AARCH64_X0_REGNUM
;
1966 /* By using store_unsigned_integer we avoid having to do
1967 anything special for small big-endian values. */
1968 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1969 store_unsigned_integer (valbuf
,
1970 (len
> X_REGISTER_SIZE
1971 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1972 len
-= X_REGISTER_SIZE
;
1973 valbuf
+= X_REGISTER_SIZE
;
1978 /* For a structure or union the behaviour is as if the value had
1979 been stored to word-aligned memory and then loaded into
1980 registers with 64-bit load instruction(s). */
1981 int len
= TYPE_LENGTH (type
);
1982 int regno
= AARCH64_X0_REGNUM
;
1983 bfd_byte buf
[X_REGISTER_SIZE
];
1987 regs
->cooked_read (regno
++, buf
);
1988 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1989 len
-= X_REGISTER_SIZE
;
1990 valbuf
+= X_REGISTER_SIZE
;
1996 /* Will a function return an aggregate type in memory or in a
1997 register? Return 0 if an aggregate type can be returned in a
1998 register, 1 if it must be returned in memory. */
2001 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2003 type
= check_typedef (type
);
2005 struct type
*fundamental_type
;
2007 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2010 /* v0-v7 are used to return values and one register is allocated
2011 for one member. However, HFA or HVA has at most four members. */
2015 if (TYPE_LENGTH (type
) > 16)
2017 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2018 invisible reference. */
2026 /* Write into appropriate registers a function return value of type
2027 TYPE, given in virtual format. */
2030 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2031 const gdb_byte
*valbuf
)
2033 struct gdbarch
*gdbarch
= regs
->arch ();
2034 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2036 struct type
*fundamental_type
;
2038 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2041 int len
= TYPE_LENGTH (fundamental_type
);
2043 for (int i
= 0; i
< elements
; i
++)
2045 int regno
= AARCH64_V0_REGNUM
+ i
;
2046 /* Enough space for a full vector register. */
2047 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2048 gdb_assert (len
<= sizeof (tmpbuf
));
2052 debug_printf ("write HFA or HVA return value element %d to %s\n",
2054 gdbarch_register_name (gdbarch
, regno
));
2057 memcpy (tmpbuf
, valbuf
,
2058 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2059 regs
->cooked_write (regno
, tmpbuf
);
2063 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2064 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2065 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2066 || TYPE_CODE (type
) == TYPE_CODE_PTR
2067 || TYPE_IS_REFERENCE (type
)
2068 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2070 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2072 /* Values of one word or less are zero/sign-extended and
2074 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2075 LONGEST val
= unpack_long (type
, valbuf
);
2077 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2078 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2082 /* Integral values greater than one word are stored in
2083 consecutive registers starting with r0. This will always
2084 be a multiple of the regiser size. */
2085 int len
= TYPE_LENGTH (type
);
2086 int regno
= AARCH64_X0_REGNUM
;
2090 regs
->cooked_write (regno
++, valbuf
);
2091 len
-= X_REGISTER_SIZE
;
2092 valbuf
+= X_REGISTER_SIZE
;
2098 /* For a structure or union the behaviour is as if the value had
2099 been stored to word-aligned memory and then loaded into
2100 registers with 64-bit load instruction(s). */
2101 int len
= TYPE_LENGTH (type
);
2102 int regno
= AARCH64_X0_REGNUM
;
2103 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2107 memcpy (tmpbuf
, valbuf
,
2108 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2109 regs
->cooked_write (regno
++, tmpbuf
);
2110 len
-= X_REGISTER_SIZE
;
2111 valbuf
+= X_REGISTER_SIZE
;
2116 /* Implement the "return_value" gdbarch method. */
2118 static enum return_value_convention
2119 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2120 struct type
*valtype
, struct regcache
*regcache
,
2121 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2124 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2125 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2126 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2128 if (aarch64_return_in_memory (gdbarch
, valtype
))
2131 debug_printf ("return value in memory\n");
2132 return RETURN_VALUE_STRUCT_CONVENTION
;
2137 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2140 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2143 debug_printf ("return value in registers\n");
2145 return RETURN_VALUE_REGISTER_CONVENTION
;
2148 /* Implement the "get_longjmp_target" gdbarch method. */
2151 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2154 gdb_byte buf
[X_REGISTER_SIZE
];
2155 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2156 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2157 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2159 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2161 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2165 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2169 /* Implement the "gen_return_address" gdbarch method. */
2172 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2173 struct agent_expr
*ax
, struct axs_value
*value
,
2176 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2177 value
->kind
= axs_lvalue_register
;
2178 value
->u
.reg
= AARCH64_LR_REGNUM
;
2182 /* Return the pseudo register name corresponding to register regnum. */
2185 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2187 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2189 static const char *const q_name
[] =
2191 "q0", "q1", "q2", "q3",
2192 "q4", "q5", "q6", "q7",
2193 "q8", "q9", "q10", "q11",
2194 "q12", "q13", "q14", "q15",
2195 "q16", "q17", "q18", "q19",
2196 "q20", "q21", "q22", "q23",
2197 "q24", "q25", "q26", "q27",
2198 "q28", "q29", "q30", "q31",
2201 static const char *const d_name
[] =
2203 "d0", "d1", "d2", "d3",
2204 "d4", "d5", "d6", "d7",
2205 "d8", "d9", "d10", "d11",
2206 "d12", "d13", "d14", "d15",
2207 "d16", "d17", "d18", "d19",
2208 "d20", "d21", "d22", "d23",
2209 "d24", "d25", "d26", "d27",
2210 "d28", "d29", "d30", "d31",
2213 static const char *const s_name
[] =
2215 "s0", "s1", "s2", "s3",
2216 "s4", "s5", "s6", "s7",
2217 "s8", "s9", "s10", "s11",
2218 "s12", "s13", "s14", "s15",
2219 "s16", "s17", "s18", "s19",
2220 "s20", "s21", "s22", "s23",
2221 "s24", "s25", "s26", "s27",
2222 "s28", "s29", "s30", "s31",
2225 static const char *const h_name
[] =
2227 "h0", "h1", "h2", "h3",
2228 "h4", "h5", "h6", "h7",
2229 "h8", "h9", "h10", "h11",
2230 "h12", "h13", "h14", "h15",
2231 "h16", "h17", "h18", "h19",
2232 "h20", "h21", "h22", "h23",
2233 "h24", "h25", "h26", "h27",
2234 "h28", "h29", "h30", "h31",
2237 static const char *const b_name
[] =
2239 "b0", "b1", "b2", "b3",
2240 "b4", "b5", "b6", "b7",
2241 "b8", "b9", "b10", "b11",
2242 "b12", "b13", "b14", "b15",
2243 "b16", "b17", "b18", "b19",
2244 "b20", "b21", "b22", "b23",
2245 "b24", "b25", "b26", "b27",
2246 "b28", "b29", "b30", "b31",
2249 regnum
-= gdbarch_num_regs (gdbarch
);
2251 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2252 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2254 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2255 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2257 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2258 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2260 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2261 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2263 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2264 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2266 if (tdep
->has_sve ())
2268 static const char *const sve_v_name
[] =
2270 "v0", "v1", "v2", "v3",
2271 "v4", "v5", "v6", "v7",
2272 "v8", "v9", "v10", "v11",
2273 "v12", "v13", "v14", "v15",
2274 "v16", "v17", "v18", "v19",
2275 "v20", "v21", "v22", "v23",
2276 "v24", "v25", "v26", "v27",
2277 "v28", "v29", "v30", "v31",
2280 if (regnum
>= AARCH64_SVE_V0_REGNUM
2281 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2282 return sve_v_name
[regnum
- AARCH64_SVE_V0_REGNUM
];
2285 internal_error (__FILE__
, __LINE__
,
2286 _("aarch64_pseudo_register_name: bad register number %d"),
2290 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2292 static struct type
*
2293 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2295 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2297 regnum
-= gdbarch_num_regs (gdbarch
);
2299 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2300 return aarch64_vnq_type (gdbarch
);
2302 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2303 return aarch64_vnd_type (gdbarch
);
2305 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2306 return aarch64_vns_type (gdbarch
);
2308 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2309 return aarch64_vnh_type (gdbarch
);
2311 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2312 return aarch64_vnb_type (gdbarch
);
2314 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2315 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2316 return aarch64_vnv_type (gdbarch
);
2318 internal_error (__FILE__
, __LINE__
,
2319 _("aarch64_pseudo_register_type: bad register number %d"),
2323 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2326 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2327 struct reggroup
*group
)
2329 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2331 regnum
-= gdbarch_num_regs (gdbarch
);
2333 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2334 return group
== all_reggroup
|| group
== vector_reggroup
;
2335 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2336 return (group
== all_reggroup
|| group
== vector_reggroup
2337 || group
== float_reggroup
);
2338 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2339 return (group
== all_reggroup
|| group
== vector_reggroup
2340 || group
== float_reggroup
);
2341 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2342 return group
== all_reggroup
|| group
== vector_reggroup
;
2343 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2344 return group
== all_reggroup
|| group
== vector_reggroup
;
2345 else if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2346 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2347 return group
== all_reggroup
|| group
== vector_reggroup
;
2349 return group
== all_reggroup
;
2352 /* Helper for aarch64_pseudo_read_value. */
2354 static struct value
*
2355 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2356 readable_regcache
*regcache
, int regnum_offset
,
2357 int regsize
, struct value
*result_value
)
2359 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2361 /* Enough space for a full vector register. */
2362 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2363 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2365 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2366 mark_value_bytes_unavailable (result_value
, 0,
2367 TYPE_LENGTH (value_type (result_value
)));
2369 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2371 return result_value
;
2374 /* Implement the "pseudo_register_read_value" gdbarch method. */
2376 static struct value
*
2377 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2380 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2381 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2383 VALUE_LVAL (result_value
) = lval_register
;
2384 VALUE_REGNUM (result_value
) = regnum
;
2386 regnum
-= gdbarch_num_regs (gdbarch
);
2388 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2389 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2390 regnum
- AARCH64_Q0_REGNUM
,
2391 Q_REGISTER_SIZE
, result_value
);
2393 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2394 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2395 regnum
- AARCH64_D0_REGNUM
,
2396 D_REGISTER_SIZE
, result_value
);
2398 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2399 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2400 regnum
- AARCH64_S0_REGNUM
,
2401 S_REGISTER_SIZE
, result_value
);
2403 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2404 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2405 regnum
- AARCH64_H0_REGNUM
,
2406 H_REGISTER_SIZE
, result_value
);
2408 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2409 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2410 regnum
- AARCH64_B0_REGNUM
,
2411 B_REGISTER_SIZE
, result_value
);
2413 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2414 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2415 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2416 regnum
- AARCH64_SVE_V0_REGNUM
,
2417 V_REGISTER_SIZE
, result_value
);
2419 gdb_assert_not_reached ("regnum out of bound");
2422 /* Helper for aarch64_pseudo_write. */
2425 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2426 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2428 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2430 /* Enough space for a full vector register. */
2431 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2432 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2434 /* Ensure the register buffer is zero, we want gdb writes of the
2435 various 'scalar' pseudo registers to behavior like architectural
2436 writes, register width bytes are written the remainder are set to
2438 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2440 memcpy (reg_buf
, buf
, regsize
);
2441 regcache
->raw_write (v_regnum
, reg_buf
);
2444 /* Implement the "pseudo_register_write" gdbarch method. */
2447 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2448 int regnum
, const gdb_byte
*buf
)
2450 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2451 regnum
-= gdbarch_num_regs (gdbarch
);
2453 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2454 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2455 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2458 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2459 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2460 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2463 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2464 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2465 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2468 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2469 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2470 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2473 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2474 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2475 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2478 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2479 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2480 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2481 regnum
- AARCH64_SVE_V0_REGNUM
,
2482 V_REGISTER_SIZE
, buf
);
2484 gdb_assert_not_reached ("regnum out of bound");
2487 /* Callback function for user_reg_add. */
2489 static struct value
*
2490 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2492 const int *reg_p
= (const int *) baton
;
2494 return value_of_register (*reg_p
, frame
);
2498 /* Implement the "software_single_step" gdbarch method, needed to
2499 single step through atomic sequences on AArch64. */
2501 static std::vector
<CORE_ADDR
>
2502 aarch64_software_single_step (struct regcache
*regcache
)
2504 struct gdbarch
*gdbarch
= regcache
->arch ();
2505 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2506 const int insn_size
= 4;
2507 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2508 CORE_ADDR pc
= regcache_read_pc (regcache
);
2509 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2511 CORE_ADDR closing_insn
= 0;
2512 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2513 byte_order_for_code
);
2516 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2517 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2520 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2523 /* Look for a Load Exclusive instruction which begins the sequence. */
2524 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2527 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2530 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2531 byte_order_for_code
);
2533 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2535 /* Check if the instruction is a conditional branch. */
2536 if (inst
.opcode
->iclass
== condbranch
)
2538 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2540 if (bc_insn_count
>= 1)
2543 /* It is, so we'll try to set a breakpoint at the destination. */
2544 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2550 /* Look for the Store Exclusive which closes the atomic sequence. */
2551 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2558 /* We didn't find a closing Store Exclusive instruction, fall back. */
2562 /* Insert breakpoint after the end of the atomic sequence. */
2563 breaks
[0] = loc
+ insn_size
;
2565 /* Check for duplicated breakpoints, and also check that the second
2566 breakpoint is not within the atomic sequence. */
2568 && (breaks
[1] == breaks
[0]
2569 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2570 last_breakpoint
= 0;
2572 std::vector
<CORE_ADDR
> next_pcs
;
2574 /* Insert the breakpoint at the end of the sequence, and one at the
2575 destination of the conditional branch, if it exists. */
2576 for (index
= 0; index
<= last_breakpoint
; index
++)
2577 next_pcs
.push_back (breaks
[index
]);
2582 struct aarch64_displaced_step_closure
: public displaced_step_closure
2584 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2585 is being displaced stepping. */
2588 /* PC adjustment offset after displaced stepping. */
2589 int32_t pc_adjust
= 0;
2592 /* Data when visiting instructions for displaced stepping. */
2594 struct aarch64_displaced_step_data
2596 struct aarch64_insn_data base
;
2598 /* The address where the instruction will be executed at. */
2600 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2601 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2602 /* Number of instructions in INSN_BUF. */
2603 unsigned insn_count
;
2604 /* Registers when doing displaced stepping. */
2605 struct regcache
*regs
;
2607 aarch64_displaced_step_closure
*dsc
;
2610 /* Implementation of aarch64_insn_visitor method "b". */
2613 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2614 struct aarch64_insn_data
*data
)
2616 struct aarch64_displaced_step_data
*dsd
2617 = (struct aarch64_displaced_step_data
*) data
;
2618 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2620 if (can_encode_int32 (new_offset
, 28))
2622 /* Emit B rather than BL, because executing BL on a new address
2623 will get the wrong address into LR. In order to avoid this,
2624 we emit B, and update LR if the instruction is BL. */
2625 emit_b (dsd
->insn_buf
, 0, new_offset
);
2631 emit_nop (dsd
->insn_buf
);
2633 dsd
->dsc
->pc_adjust
= offset
;
2639 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2640 data
->insn_addr
+ 4);
2644 /* Implementation of aarch64_insn_visitor method "b_cond". */
2647 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2648 struct aarch64_insn_data
*data
)
2650 struct aarch64_displaced_step_data
*dsd
2651 = (struct aarch64_displaced_step_data
*) data
;
2653 /* GDB has to fix up PC after displaced step this instruction
2654 differently according to the condition is true or false. Instead
2655 of checking COND against conditional flags, we can use
2656 the following instructions, and GDB can tell how to fix up PC
2657 according to the PC value.
2659 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2665 emit_bcond (dsd
->insn_buf
, cond
, 8);
2667 dsd
->dsc
->pc_adjust
= offset
;
2668 dsd
->insn_count
= 1;
2671 /* Dynamically allocate a new register. If we know the register
2672 statically, we should make it a global as above instead of using this
2675 static struct aarch64_register
2676 aarch64_register (unsigned num
, int is64
)
2678 return (struct aarch64_register
) { num
, is64
};
2681 /* Implementation of aarch64_insn_visitor method "cb". */
2684 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2685 const unsigned rn
, int is64
,
2686 struct aarch64_insn_data
*data
)
2688 struct aarch64_displaced_step_data
*dsd
2689 = (struct aarch64_displaced_step_data
*) data
;
2691 /* The offset is out of range for a compare and branch
2692 instruction. We can use the following instructions instead:
2694 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2699 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2700 dsd
->insn_count
= 1;
2702 dsd
->dsc
->pc_adjust
= offset
;
2705 /* Implementation of aarch64_insn_visitor method "tb". */
2708 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2709 const unsigned rt
, unsigned bit
,
2710 struct aarch64_insn_data
*data
)
2712 struct aarch64_displaced_step_data
*dsd
2713 = (struct aarch64_displaced_step_data
*) data
;
2715 /* The offset is out of range for a test bit and branch
2716 instruction We can use the following instructions instead:
2718 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2724 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2725 dsd
->insn_count
= 1;
2727 dsd
->dsc
->pc_adjust
= offset
;
2730 /* Implementation of aarch64_insn_visitor method "adr". */
2733 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2734 const int is_adrp
, struct aarch64_insn_data
*data
)
2736 struct aarch64_displaced_step_data
*dsd
2737 = (struct aarch64_displaced_step_data
*) data
;
2738 /* We know exactly the address the ADR{P,} instruction will compute.
2739 We can just write it to the destination register. */
2740 CORE_ADDR address
= data
->insn_addr
+ offset
;
2744 /* Clear the lower 12 bits of the offset to get the 4K page. */
2745 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2749 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2752 dsd
->dsc
->pc_adjust
= 4;
2753 emit_nop (dsd
->insn_buf
);
2754 dsd
->insn_count
= 1;
2757 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2760 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2761 const unsigned rt
, const int is64
,
2762 struct aarch64_insn_data
*data
)
2764 struct aarch64_displaced_step_data
*dsd
2765 = (struct aarch64_displaced_step_data
*) data
;
2766 CORE_ADDR address
= data
->insn_addr
+ offset
;
2767 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2769 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2773 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2774 aarch64_register (rt
, 1), zero
);
2776 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2777 aarch64_register (rt
, 1), zero
);
2779 dsd
->dsc
->pc_adjust
= 4;
2782 /* Implementation of aarch64_insn_visitor method "others". */
2785 aarch64_displaced_step_others (const uint32_t insn
,
2786 struct aarch64_insn_data
*data
)
2788 struct aarch64_displaced_step_data
*dsd
2789 = (struct aarch64_displaced_step_data
*) data
;
2791 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2792 dsd
->insn_count
= 1;
2794 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2797 dsd
->dsc
->pc_adjust
= 0;
2800 dsd
->dsc
->pc_adjust
= 4;
2803 static const struct aarch64_insn_visitor visitor
=
2805 aarch64_displaced_step_b
,
2806 aarch64_displaced_step_b_cond
,
2807 aarch64_displaced_step_cb
,
2808 aarch64_displaced_step_tb
,
2809 aarch64_displaced_step_adr
,
2810 aarch64_displaced_step_ldr_literal
,
2811 aarch64_displaced_step_others
,
2814 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2816 struct displaced_step_closure
*
2817 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2818 CORE_ADDR from
, CORE_ADDR to
,
2819 struct regcache
*regs
)
2821 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2822 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2823 struct aarch64_displaced_step_data dsd
;
2826 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2829 /* Look for a Load Exclusive instruction which begins the sequence. */
2830 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2832 /* We can't displaced step atomic sequences. */
2836 std::unique_ptr
<aarch64_displaced_step_closure
> dsc
2837 (new aarch64_displaced_step_closure
);
2838 dsd
.base
.insn_addr
= from
;
2841 dsd
.dsc
= dsc
.get ();
2843 aarch64_relocate_instruction (insn
, &visitor
,
2844 (struct aarch64_insn_data
*) &dsd
);
2845 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2847 if (dsd
.insn_count
!= 0)
2851 /* Instruction can be relocated to scratch pad. Copy
2852 relocated instruction(s) there. */
2853 for (i
= 0; i
< dsd
.insn_count
; i
++)
2855 if (debug_displaced
)
2857 debug_printf ("displaced: writing insn ");
2858 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2859 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2861 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2862 (ULONGEST
) dsd
.insn_buf
[i
]);
2870 return dsc
.release ();
2873 /* Implement the "displaced_step_fixup" gdbarch method. */
2876 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2877 struct displaced_step_closure
*dsc_
,
2878 CORE_ADDR from
, CORE_ADDR to
,
2879 struct regcache
*regs
)
2881 aarch64_displaced_step_closure
*dsc
= (aarch64_displaced_step_closure
*) dsc_
;
2887 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2890 /* Condition is true. */
2892 else if (pc
- to
== 4)
2894 /* Condition is false. */
2898 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2901 if (dsc
->pc_adjust
!= 0)
2903 if (debug_displaced
)
2905 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2906 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2908 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2909 from
+ dsc
->pc_adjust
);
2913 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2916 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2917 struct displaced_step_closure
*closure
)
2922 /* Get the correct target description for the given VQ value.
2923 If VQ is zero then it is assumed SVE is not supported.
2924 (It is not possible to set VQ to zero on an SVE system). */
2927 aarch64_read_description (uint64_t vq
)
2929 if (vq
> AARCH64_MAX_SVE_VQ
)
2930 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
2931 AARCH64_MAX_SVE_VQ
);
2933 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
];
2937 tdesc
= aarch64_create_target_description (vq
);
2938 tdesc_aarch64_list
[vq
] = tdesc
;
2944 /* Return the VQ used when creating the target description TDESC. */
2947 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
2949 const struct tdesc_feature
*feature_sve
;
2951 if (!tdesc_has_registers (tdesc
))
2954 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2956 if (feature_sve
== nullptr)
2959 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
2960 aarch64_sve_register_names
[0]) / 8;
2961 return sve_vq_from_vl (vl
);
2965 /* Initialize the current architecture based on INFO. If possible,
2966 re-use an architecture from ARCHES, which is a list of
2967 architectures already created during this debugging session.
2969 Called e.g. at program startup, when reading a core file, and when
2970 reading a binary file. */
2972 static struct gdbarch
*
2973 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2975 struct gdbarch_tdep
*tdep
;
2976 struct gdbarch
*gdbarch
;
2977 struct gdbarch_list
*best_arch
;
2978 struct tdesc_arch_data
*tdesc_data
= NULL
;
2979 const struct target_desc
*tdesc
= info
.target_desc
;
2982 const struct tdesc_feature
*feature_core
;
2983 const struct tdesc_feature
*feature_fpu
;
2984 const struct tdesc_feature
*feature_sve
;
2986 int num_pseudo_regs
= 0;
2988 /* Ensure we always have a target description. */
2989 if (!tdesc_has_registers (tdesc
))
2990 tdesc
= aarch64_read_description (0);
2993 feature_core
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2994 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2995 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2997 if (feature_core
== NULL
)
3000 tdesc_data
= tdesc_data_alloc ();
3002 /* Validate the description provides the mandatory core R registers
3003 and allocate their numbers. */
3004 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3005 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
,
3006 AARCH64_X0_REGNUM
+ i
,
3007 aarch64_r_register_names
[i
]);
3009 num_regs
= AARCH64_X0_REGNUM
+ i
;
3011 /* Add the V registers. */
3012 if (feature_fpu
!= NULL
)
3014 if (feature_sve
!= NULL
)
3015 error (_("Program contains both fpu and SVE features."));
3017 /* Validate the description provides the mandatory V registers
3018 and allocate their numbers. */
3019 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3020 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
,
3021 AARCH64_V0_REGNUM
+ i
,
3022 aarch64_v_register_names
[i
]);
3024 num_regs
= AARCH64_V0_REGNUM
+ i
;
3027 /* Add the SVE registers. */
3028 if (feature_sve
!= NULL
)
3030 /* Validate the description provides the mandatory SVE registers
3031 and allocate their numbers. */
3032 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3033 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
,
3034 AARCH64_SVE_Z0_REGNUM
+ i
,
3035 aarch64_sve_register_names
[i
]);
3037 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3038 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3041 if (feature_fpu
!= NULL
|| feature_sve
!= NULL
)
3043 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3044 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3045 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3046 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3047 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3052 tdesc_data_cleanup (tdesc_data
);
3056 /* AArch64 code is always little-endian. */
3057 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3059 /* If there is already a candidate, use it. */
3060 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3062 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3064 /* Found a match. */
3068 if (best_arch
!= NULL
)
3070 if (tdesc_data
!= NULL
)
3071 tdesc_data_cleanup (tdesc_data
);
3072 return best_arch
->gdbarch
;
3075 tdep
= XCNEW (struct gdbarch_tdep
);
3076 gdbarch
= gdbarch_alloc (&info
, tdep
);
3078 /* This should be low enough for everything. */
3079 tdep
->lowest_pc
= 0x20;
3080 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3081 tdep
->jb_elt_size
= 8;
3082 tdep
->vq
= aarch64_get_tdesc_vq (tdesc
);
3084 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3085 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3087 /* Frame handling. */
3088 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
3089 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
3090 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
3092 /* Advance PC across function entry code. */
3093 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3095 /* The stack grows downward. */
3096 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3098 /* Breakpoint manipulation. */
3099 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3100 aarch64_breakpoint::kind_from_pc
);
3101 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3102 aarch64_breakpoint::bp_from_kind
);
3103 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3104 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3106 /* Information about registers, etc. */
3107 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3108 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3109 set_gdbarch_num_regs (gdbarch
, num_regs
);
3111 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3112 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3113 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3114 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3115 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3116 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3117 aarch64_pseudo_register_reggroup_p
);
3120 set_gdbarch_short_bit (gdbarch
, 16);
3121 set_gdbarch_int_bit (gdbarch
, 32);
3122 set_gdbarch_float_bit (gdbarch
, 32);
3123 set_gdbarch_double_bit (gdbarch
, 64);
3124 set_gdbarch_long_double_bit (gdbarch
, 128);
3125 set_gdbarch_long_bit (gdbarch
, 64);
3126 set_gdbarch_long_long_bit (gdbarch
, 64);
3127 set_gdbarch_ptr_bit (gdbarch
, 64);
3128 set_gdbarch_char_signed (gdbarch
, 0);
3129 set_gdbarch_wchar_signed (gdbarch
, 0);
3130 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3131 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3132 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3134 /* Internal <-> external register number maps. */
3135 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3137 /* Returning results. */
3138 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3141 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3143 /* Virtual tables. */
3144 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3146 /* Hook in the ABI-specific overrides, if they have been registered. */
3147 info
.target_desc
= tdesc
;
3148 info
.tdesc_data
= tdesc_data
;
3149 gdbarch_init_osabi (info
, gdbarch
);
3151 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3153 /* Add some default predicates. */
3154 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3155 dwarf2_append_unwinders (gdbarch
);
3156 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3158 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3160 /* Now we have tuned the configuration, set a few final things,
3161 based on what the OS ABI has told us. */
3163 if (tdep
->jb_pc
>= 0)
3164 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3166 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3168 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3170 /* Add standard register aliases. */
3171 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3172 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3173 value_of_aarch64_user_reg
,
3174 &aarch64_register_aliases
[i
].regnum
);
3180 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3182 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3187 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3188 paddress (gdbarch
, tdep
->lowest_pc
));
3194 static void aarch64_process_record_test (void);
3199 _initialize_aarch64_tdep (void)
3201 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3204 /* Debug this file's internals. */
3205 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3206 Set AArch64 debugging."), _("\
3207 Show AArch64 debugging."), _("\
3208 When on, AArch64 specific debugging is enabled."),
3211 &setdebuglist
, &showdebuglist
);
3214 selftests::register_test ("aarch64-analyze-prologue",
3215 selftests::aarch64_analyze_prologue_test
);
3216 selftests::register_test ("aarch64-process-record",
3217 selftests::aarch64_process_record_test
);
3218 selftests::record_xml_tdesc ("aarch64.xml",
3219 aarch64_create_target_description (0));
3223 /* AArch64 process record-replay related structures, defines etc. */
3225 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3228 unsigned int reg_len = LENGTH; \
3231 REGS = XNEWVEC (uint32_t, reg_len); \
3232 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3237 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3240 unsigned int mem_len = LENGTH; \
3243 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3244 memcpy(&MEMS->len, &RECORD_BUF[0], \
3245 sizeof(struct aarch64_mem_r) * LENGTH); \
3250 /* AArch64 record/replay structures and enumerations. */
3252 struct aarch64_mem_r
3254 uint64_t len
; /* Record length. */
3255 uint64_t addr
; /* Memory address. */
3258 enum aarch64_record_result
3260 AARCH64_RECORD_SUCCESS
,
3261 AARCH64_RECORD_UNSUPPORTED
,
3262 AARCH64_RECORD_UNKNOWN
3265 typedef struct insn_decode_record_t
3267 struct gdbarch
*gdbarch
;
3268 struct regcache
*regcache
;
3269 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3270 uint32_t aarch64_insn
; /* Insn to be recorded. */
3271 uint32_t mem_rec_count
; /* Count of memory records. */
3272 uint32_t reg_rec_count
; /* Count of register records. */
3273 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3274 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3275 } insn_decode_record
;
3277 /* Record handler for data processing - register instructions. */
3280 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3282 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3283 uint32_t record_buf
[4];
3285 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3286 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3287 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3289 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3293 /* Logical (shifted register). */
3294 if (insn_bits24_27
== 0x0a)
3295 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3297 else if (insn_bits24_27
== 0x0b)
3298 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3300 return AARCH64_RECORD_UNKNOWN
;
3302 record_buf
[0] = reg_rd
;
3303 aarch64_insn_r
->reg_rec_count
= 1;
3305 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3309 if (insn_bits24_27
== 0x0b)
3311 /* Data-processing (3 source). */
3312 record_buf
[0] = reg_rd
;
3313 aarch64_insn_r
->reg_rec_count
= 1;
3315 else if (insn_bits24_27
== 0x0a)
3317 if (insn_bits21_23
== 0x00)
3319 /* Add/subtract (with carry). */
3320 record_buf
[0] = reg_rd
;
3321 aarch64_insn_r
->reg_rec_count
= 1;
3322 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3324 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3325 aarch64_insn_r
->reg_rec_count
= 2;
3328 else if (insn_bits21_23
== 0x02)
3330 /* Conditional compare (register) and conditional compare
3331 (immediate) instructions. */
3332 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3333 aarch64_insn_r
->reg_rec_count
= 1;
3335 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3337 /* CConditional select. */
3338 /* Data-processing (2 source). */
3339 /* Data-processing (1 source). */
3340 record_buf
[0] = reg_rd
;
3341 aarch64_insn_r
->reg_rec_count
= 1;
3344 return AARCH64_RECORD_UNKNOWN
;
3348 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3350 return AARCH64_RECORD_SUCCESS
;
3353 /* Record handler for data processing - immediate instructions. */
3356 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3358 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3359 uint32_t record_buf
[4];
3361 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3362 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3363 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3365 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3366 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3367 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3369 record_buf
[0] = reg_rd
;
3370 aarch64_insn_r
->reg_rec_count
= 1;
3372 else if (insn_bits24_27
== 0x01)
3374 /* Add/Subtract (immediate). */
3375 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3376 record_buf
[0] = reg_rd
;
3377 aarch64_insn_r
->reg_rec_count
= 1;
3379 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3381 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3383 /* Logical (immediate). */
3384 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3385 record_buf
[0] = reg_rd
;
3386 aarch64_insn_r
->reg_rec_count
= 1;
3388 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3391 return AARCH64_RECORD_UNKNOWN
;
3393 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3395 return AARCH64_RECORD_SUCCESS
;
3398 /* Record handler for branch, exception generation and system instructions. */
3401 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3403 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3404 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3405 uint32_t record_buf
[4];
3407 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3408 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3409 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3411 if (insn_bits28_31
== 0x0d)
3413 /* Exception generation instructions. */
3414 if (insn_bits24_27
== 0x04)
3416 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3417 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3418 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3420 ULONGEST svc_number
;
3422 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3424 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3428 return AARCH64_RECORD_UNSUPPORTED
;
3430 /* System instructions. */
3431 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3433 uint32_t reg_rt
, reg_crn
;
3435 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3436 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3438 /* Record rt in case of sysl and mrs instructions. */
3439 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3441 record_buf
[0] = reg_rt
;
3442 aarch64_insn_r
->reg_rec_count
= 1;
3444 /* Record cpsr for hint and msr(immediate) instructions. */
3445 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3447 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3448 aarch64_insn_r
->reg_rec_count
= 1;
3451 /* Unconditional branch (register). */
3452 else if((insn_bits24_27
& 0x0e) == 0x06)
3454 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3455 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3456 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3459 return AARCH64_RECORD_UNKNOWN
;
3461 /* Unconditional branch (immediate). */
3462 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3464 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3465 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3466 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3469 /* Compare & branch (immediate), Test & branch (immediate) and
3470 Conditional branch (immediate). */
3471 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3473 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3475 return AARCH64_RECORD_SUCCESS
;
3478 /* Record handler for advanced SIMD load and store instructions. */
3481 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3484 uint64_t addr_offset
= 0;
3485 uint32_t record_buf
[24];
3486 uint64_t record_buf_mem
[24];
3487 uint32_t reg_rn
, reg_rt
;
3488 uint32_t reg_index
= 0, mem_index
= 0;
3489 uint8_t opcode_bits
, size_bits
;
3491 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3492 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3493 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3494 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3495 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3498 debug_printf ("Process record: Advanced SIMD load/store\n");
3500 /* Load/store single structure. */
3501 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3503 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3504 scale
= opcode_bits
>> 2;
3505 selem
= ((opcode_bits
& 0x02) |
3506 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3510 if (size_bits
& 0x01)
3511 return AARCH64_RECORD_UNKNOWN
;
3514 if ((size_bits
>> 1) & 0x01)
3515 return AARCH64_RECORD_UNKNOWN
;
3516 if (size_bits
& 0x01)
3518 if (!((opcode_bits
>> 1) & 0x01))
3521 return AARCH64_RECORD_UNKNOWN
;
3525 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3532 return AARCH64_RECORD_UNKNOWN
;
3538 for (sindex
= 0; sindex
< selem
; sindex
++)
3540 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3541 reg_rt
= (reg_rt
+ 1) % 32;
3545 for (sindex
= 0; sindex
< selem
; sindex
++)
3547 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3548 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3551 record_buf_mem
[mem_index
++] = esize
/ 8;
3552 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3554 addr_offset
= addr_offset
+ (esize
/ 8);
3555 reg_rt
= (reg_rt
+ 1) % 32;
3559 /* Load/store multiple structure. */
3562 uint8_t selem
, esize
, rpt
, elements
;
3563 uint8_t eindex
, rindex
;
3565 esize
= 8 << size_bits
;
3566 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3567 elements
= 128 / esize
;
3569 elements
= 64 / esize
;
3571 switch (opcode_bits
)
3573 /*LD/ST4 (4 Registers). */
3578 /*LD/ST1 (4 Registers). */
3583 /*LD/ST3 (3 Registers). */
3588 /*LD/ST1 (3 Registers). */
3593 /*LD/ST1 (1 Register). */
3598 /*LD/ST2 (2 Registers). */
3603 /*LD/ST1 (2 Registers). */
3609 return AARCH64_RECORD_UNSUPPORTED
;
3612 for (rindex
= 0; rindex
< rpt
; rindex
++)
3613 for (eindex
= 0; eindex
< elements
; eindex
++)
3615 uint8_t reg_tt
, sindex
;
3616 reg_tt
= (reg_rt
+ rindex
) % 32;
3617 for (sindex
= 0; sindex
< selem
; sindex
++)
3619 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3620 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3623 record_buf_mem
[mem_index
++] = esize
/ 8;
3624 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3626 addr_offset
= addr_offset
+ (esize
/ 8);
3627 reg_tt
= (reg_tt
+ 1) % 32;
3632 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3633 record_buf
[reg_index
++] = reg_rn
;
3635 aarch64_insn_r
->reg_rec_count
= reg_index
;
3636 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3637 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3639 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3641 return AARCH64_RECORD_SUCCESS
;
3644 /* Record handler for load and store instructions. */
3647 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3649 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3650 uint8_t insn_bit23
, insn_bit21
;
3651 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3652 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3653 uint64_t datasize
, offset
;
3654 uint32_t record_buf
[8];
3655 uint64_t record_buf_mem
[8];
3658 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3659 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3660 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3661 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3662 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3663 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3664 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3665 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3666 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3667 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3668 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3670 /* Load/store exclusive. */
3671 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3674 debug_printf ("Process record: load/store exclusive\n");
3678 record_buf
[0] = reg_rt
;
3679 aarch64_insn_r
->reg_rec_count
= 1;
3682 record_buf
[1] = reg_rt2
;
3683 aarch64_insn_r
->reg_rec_count
= 2;
3689 datasize
= (8 << size_bits
) * 2;
3691 datasize
= (8 << size_bits
);
3692 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3694 record_buf_mem
[0] = datasize
/ 8;
3695 record_buf_mem
[1] = address
;
3696 aarch64_insn_r
->mem_rec_count
= 1;
3699 /* Save register rs. */
3700 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3701 aarch64_insn_r
->reg_rec_count
= 1;
3705 /* Load register (literal) instructions decoding. */
3706 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3709 debug_printf ("Process record: load register (literal)\n");
3711 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3713 record_buf
[0] = reg_rt
;
3714 aarch64_insn_r
->reg_rec_count
= 1;
3716 /* All types of load/store pair instructions decoding. */
3717 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3720 debug_printf ("Process record: load/store pair\n");
3726 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3727 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3731 record_buf
[0] = reg_rt
;
3732 record_buf
[1] = reg_rt2
;
3734 aarch64_insn_r
->reg_rec_count
= 2;
3739 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3741 size_bits
= size_bits
>> 1;
3742 datasize
= 8 << (2 + size_bits
);
3743 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3744 offset
= offset
<< (2 + size_bits
);
3745 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3747 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3749 if (imm7_off
& 0x40)
3750 address
= address
- offset
;
3752 address
= address
+ offset
;
3755 record_buf_mem
[0] = datasize
/ 8;
3756 record_buf_mem
[1] = address
;
3757 record_buf_mem
[2] = datasize
/ 8;
3758 record_buf_mem
[3] = address
+ (datasize
/ 8);
3759 aarch64_insn_r
->mem_rec_count
= 2;
3761 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3762 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3764 /* Load/store register (unsigned immediate) instructions. */
3765 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3767 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3777 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
3779 /* PRFM (immediate) */
3780 return AARCH64_RECORD_SUCCESS
;
3782 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
3784 /* LDRSW (immediate) */
3798 debug_printf ("Process record: load/store (unsigned immediate):"
3799 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3805 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3806 datasize
= 8 << size_bits
;
3807 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3809 offset
= offset
<< size_bits
;
3810 address
= address
+ offset
;
3812 record_buf_mem
[0] = datasize
>> 3;
3813 record_buf_mem
[1] = address
;
3814 aarch64_insn_r
->mem_rec_count
= 1;
3819 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3821 record_buf
[0] = reg_rt
;
3822 aarch64_insn_r
->reg_rec_count
= 1;
3825 /* Load/store register (register offset) instructions. */
3826 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3827 && insn_bits10_11
== 0x02 && insn_bit21
)
3830 debug_printf ("Process record: load/store (register offset)\n");
3831 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3838 if (size_bits
!= 0x03)
3841 return AARCH64_RECORD_UNKNOWN
;
3845 ULONGEST reg_rm_val
;
3847 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3848 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3849 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3850 offset
= reg_rm_val
<< size_bits
;
3852 offset
= reg_rm_val
;
3853 datasize
= 8 << size_bits
;
3854 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3856 address
= address
+ offset
;
3857 record_buf_mem
[0] = datasize
>> 3;
3858 record_buf_mem
[1] = address
;
3859 aarch64_insn_r
->mem_rec_count
= 1;
3864 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3866 record_buf
[0] = reg_rt
;
3867 aarch64_insn_r
->reg_rec_count
= 1;
3870 /* Load/store register (immediate and unprivileged) instructions. */
3871 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3876 debug_printf ("Process record: load/store "
3877 "(immediate and unprivileged)\n");
3879 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3886 if (size_bits
!= 0x03)
3889 return AARCH64_RECORD_UNKNOWN
;
3894 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3895 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3896 datasize
= 8 << size_bits
;
3897 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3899 if (insn_bits10_11
!= 0x01)
3901 if (imm9_off
& 0x0100)
3902 address
= address
- offset
;
3904 address
= address
+ offset
;
3906 record_buf_mem
[0] = datasize
>> 3;
3907 record_buf_mem
[1] = address
;
3908 aarch64_insn_r
->mem_rec_count
= 1;
3913 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3915 record_buf
[0] = reg_rt
;
3916 aarch64_insn_r
->reg_rec_count
= 1;
3918 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3919 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3921 /* Advanced SIMD load/store instructions. */
3923 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3925 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3927 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3929 return AARCH64_RECORD_SUCCESS
;
3932 /* Record handler for data processing SIMD and floating point instructions. */
3935 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3937 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3938 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3939 uint8_t insn_bits11_14
;
3940 uint32_t record_buf
[2];
3942 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3943 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3944 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3945 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3946 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3947 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3948 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3949 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3950 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3953 debug_printf ("Process record: data processing SIMD/FP: ");
3955 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3957 /* Floating point - fixed point conversion instructions. */
3961 debug_printf ("FP - fixed point conversion");
3963 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3964 record_buf
[0] = reg_rd
;
3966 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3968 /* Floating point - conditional compare instructions. */
3969 else if (insn_bits10_11
== 0x01)
3972 debug_printf ("FP - conditional compare");
3974 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3976 /* Floating point - data processing (2-source) and
3977 conditional select instructions. */
3978 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3981 debug_printf ("FP - DP (2-source)");
3983 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3985 else if (insn_bits10_11
== 0x00)
3987 /* Floating point - immediate instructions. */
3988 if ((insn_bits12_15
& 0x01) == 0x01
3989 || (insn_bits12_15
& 0x07) == 0x04)
3992 debug_printf ("FP - immediate");
3993 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3995 /* Floating point - compare instructions. */
3996 else if ((insn_bits12_15
& 0x03) == 0x02)
3999 debug_printf ("FP - immediate");
4000 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4002 /* Floating point - integer conversions instructions. */
4003 else if (insn_bits12_15
== 0x00)
4005 /* Convert float to integer instruction. */
4006 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4009 debug_printf ("float to int conversion");
4011 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4013 /* Convert integer to float instruction. */
4014 else if ((opcode
>> 1) == 0x01 && !rmode
)
4017 debug_printf ("int to float conversion");
4019 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4021 /* Move float to integer instruction. */
4022 else if ((opcode
>> 1) == 0x03)
4025 debug_printf ("move float to int");
4027 if (!(opcode
& 0x01))
4028 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4030 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4033 return AARCH64_RECORD_UNKNOWN
;
4036 return AARCH64_RECORD_UNKNOWN
;
4039 return AARCH64_RECORD_UNKNOWN
;
4041 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4044 debug_printf ("SIMD copy");
4046 /* Advanced SIMD copy instructions. */
4047 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4048 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4049 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4051 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4052 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4054 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4057 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4059 /* All remaining floating point or advanced SIMD instructions. */
4063 debug_printf ("all remain");
4065 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4069 debug_printf ("\n");
4071 aarch64_insn_r
->reg_rec_count
++;
4072 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
4073 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4075 return AARCH64_RECORD_SUCCESS
;
4078 /* Decodes insns type and invokes its record handler. */
4081 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4083 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4085 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4086 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4087 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4088 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4090 /* Data processing - immediate instructions. */
4091 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4092 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4094 /* Branch, exception generation and system instructions. */
4095 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4096 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4098 /* Load and store instructions. */
4099 if (!ins_bit25
&& ins_bit27
)
4100 return aarch64_record_load_store (aarch64_insn_r
);
4102 /* Data processing - register instructions. */
4103 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4104 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4106 /* Data processing - SIMD and floating point instructions. */
4107 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4108 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4110 return AARCH64_RECORD_UNSUPPORTED
;
4113 /* Cleans up local record registers and memory allocations. */
4116 deallocate_reg_mem (insn_decode_record
*record
)
4118 xfree (record
->aarch64_regs
);
4119 xfree (record
->aarch64_mems
);
4123 namespace selftests
{
4126 aarch64_process_record_test (void)
4128 struct gdbarch_info info
;
4131 gdbarch_info_init (&info
);
4132 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4134 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4135 SELF_CHECK (gdbarch
!= NULL
);
4137 insn_decode_record aarch64_record
;
4139 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4140 aarch64_record
.regcache
= NULL
;
4141 aarch64_record
.this_addr
= 0;
4142 aarch64_record
.gdbarch
= gdbarch
;
4144 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4145 aarch64_record
.aarch64_insn
= 0xf9800020;
4146 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4147 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4148 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4149 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4151 deallocate_reg_mem (&aarch64_record
);
4154 } // namespace selftests
4155 #endif /* GDB_SELF_TEST */
4157 /* Parse the current instruction and record the values of the registers and
4158 memory that will be changed in current instruction to record_arch_list
4159 return -1 if something is wrong. */
4162 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4163 CORE_ADDR insn_addr
)
4165 uint32_t rec_no
= 0;
4166 uint8_t insn_size
= 4;
4168 gdb_byte buf
[insn_size
];
4169 insn_decode_record aarch64_record
;
4171 memset (&buf
[0], 0, insn_size
);
4172 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4173 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4174 aarch64_record
.aarch64_insn
4175 = (uint32_t) extract_unsigned_integer (&buf
[0],
4177 gdbarch_byte_order (gdbarch
));
4178 aarch64_record
.regcache
= regcache
;
4179 aarch64_record
.this_addr
= insn_addr
;
4180 aarch64_record
.gdbarch
= gdbarch
;
4182 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4183 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4185 printf_unfiltered (_("Process record does not support instruction "
4186 "0x%0x at address %s.\n"),
4187 aarch64_record
.aarch64_insn
,
4188 paddress (gdbarch
, insn_addr
));
4194 /* Record registers. */
4195 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4197 /* Always record register CPSR. */
4198 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4199 AARCH64_CPSR_REGNUM
);
4200 if (aarch64_record
.aarch64_regs
)
4201 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4202 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4203 aarch64_record
.aarch64_regs
[rec_no
]))
4206 /* Record memories. */
4207 if (aarch64_record
.aarch64_mems
)
4208 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4209 if (record_full_arch_list_add_mem
4210 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4211 aarch64_record
.aarch64_mems
[rec_no
].len
))
4214 if (record_full_arch_list_add_end ())
4218 deallocate_reg_mem (&aarch64_record
);