1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69 /* Pseudo register base numbers. */
70 #define AARCH64_Q0_REGNUM 0
71 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
72 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
76 /* The standard register names, and all the valid aliases for them. */
79 const char *const name
;
81 } aarch64_register_aliases
[] =
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM
},
85 {"lr", AARCH64_LR_REGNUM
},
86 {"sp", AARCH64_SP_REGNUM
},
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM
+ 0},
90 {"w1", AARCH64_X0_REGNUM
+ 1},
91 {"w2", AARCH64_X0_REGNUM
+ 2},
92 {"w3", AARCH64_X0_REGNUM
+ 3},
93 {"w4", AARCH64_X0_REGNUM
+ 4},
94 {"w5", AARCH64_X0_REGNUM
+ 5},
95 {"w6", AARCH64_X0_REGNUM
+ 6},
96 {"w7", AARCH64_X0_REGNUM
+ 7},
97 {"w8", AARCH64_X0_REGNUM
+ 8},
98 {"w9", AARCH64_X0_REGNUM
+ 9},
99 {"w10", AARCH64_X0_REGNUM
+ 10},
100 {"w11", AARCH64_X0_REGNUM
+ 11},
101 {"w12", AARCH64_X0_REGNUM
+ 12},
102 {"w13", AARCH64_X0_REGNUM
+ 13},
103 {"w14", AARCH64_X0_REGNUM
+ 14},
104 {"w15", AARCH64_X0_REGNUM
+ 15},
105 {"w16", AARCH64_X0_REGNUM
+ 16},
106 {"w17", AARCH64_X0_REGNUM
+ 17},
107 {"w18", AARCH64_X0_REGNUM
+ 18},
108 {"w19", AARCH64_X0_REGNUM
+ 19},
109 {"w20", AARCH64_X0_REGNUM
+ 20},
110 {"w21", AARCH64_X0_REGNUM
+ 21},
111 {"w22", AARCH64_X0_REGNUM
+ 22},
112 {"w23", AARCH64_X0_REGNUM
+ 23},
113 {"w24", AARCH64_X0_REGNUM
+ 24},
114 {"w25", AARCH64_X0_REGNUM
+ 25},
115 {"w26", AARCH64_X0_REGNUM
+ 26},
116 {"w27", AARCH64_X0_REGNUM
+ 27},
117 {"w28", AARCH64_X0_REGNUM
+ 28},
118 {"w29", AARCH64_X0_REGNUM
+ 29},
119 {"w30", AARCH64_X0_REGNUM
+ 30},
122 {"ip0", AARCH64_X0_REGNUM
+ 16},
123 {"ip1", AARCH64_X0_REGNUM
+ 17}
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names
[] =
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names
[] =
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
159 /* AArch64 prologue cache structure. */
160 struct aarch64_prologue_cache
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
176 /* Is the target available to read from? */
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
184 /* The register used to hold the frame pointer for this frame. */
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg
*saved_regs
;
192 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
193 struct cmd_list_element
*c
, const char *value
)
195 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
200 /* Abstract instruction reader. */
202 class abstract_instruction_reader
205 /* Read in one instruction. */
206 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
207 enum bfd_endian byte_order
) = 0;
210 /* Instruction reader from real target. */
212 class instruction_reader
: public abstract_instruction_reader
215 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
217 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
223 /* Analyze a prologue, looking for a recognizable stack frame
224 and frame pointer. Scan until we encounter a store that could
225 clobber the stack frame unexpectedly, or an unknown instruction. */
228 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
229 CORE_ADDR start
, CORE_ADDR limit
,
230 struct aarch64_prologue_cache
*cache
,
231 abstract_instruction_reader
& reader
)
233 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
235 /* Track X registers and D registers in prologue. */
236 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
238 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
239 regs
[i
] = pv_register (i
, 0);
240 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
242 for (; start
< limit
; start
+= 4)
247 insn
= reader
.read (start
, 4, byte_order_for_code
);
249 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
252 if (inst
.opcode
->iclass
== addsub_imm
253 && (inst
.opcode
->op
== OP_ADD
254 || strcmp ("sub", inst
.opcode
->name
) == 0))
256 unsigned rd
= inst
.operands
[0].reg
.regno
;
257 unsigned rn
= inst
.operands
[1].reg
.regno
;
259 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
260 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
261 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
262 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
264 if (inst
.opcode
->op
== OP_ADD
)
266 regs
[rd
] = pv_add_constant (regs
[rn
],
267 inst
.operands
[2].imm
.value
);
271 regs
[rd
] = pv_add_constant (regs
[rn
],
272 -inst
.operands
[2].imm
.value
);
275 else if (inst
.opcode
->iclass
== pcreladdr
276 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
278 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
279 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
281 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
283 else if (inst
.opcode
->iclass
== branch_imm
)
285 /* Stop analysis on branch. */
288 else if (inst
.opcode
->iclass
== condbranch
)
290 /* Stop analysis on branch. */
293 else if (inst
.opcode
->iclass
== branch_reg
)
295 /* Stop analysis on branch. */
298 else if (inst
.opcode
->iclass
== compbranch
)
300 /* Stop analysis on branch. */
303 else if (inst
.opcode
->op
== OP_MOVZ
)
305 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
306 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
308 else if (inst
.opcode
->iclass
== log_shift
309 && strcmp (inst
.opcode
->name
, "orr") == 0)
311 unsigned rd
= inst
.operands
[0].reg
.regno
;
312 unsigned rn
= inst
.operands
[1].reg
.regno
;
313 unsigned rm
= inst
.operands
[2].reg
.regno
;
315 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
316 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
317 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
319 if (inst
.operands
[2].shifter
.amount
== 0
320 && rn
== AARCH64_SP_REGNUM
)
326 debug_printf ("aarch64: prologue analysis gave up "
327 "addr=%s opcode=0x%x (orr x register)\n",
328 core_addr_to_string_nz (start
), insn
);
333 else if (inst
.opcode
->op
== OP_STUR
)
335 unsigned rt
= inst
.operands
[0].reg
.regno
;
336 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
338 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
340 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
341 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
342 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
343 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
345 stack
.store (pv_add_constant (regs
[rn
],
346 inst
.operands
[1].addr
.offset
.imm
),
347 is64
? 8 : 4, regs
[rt
]);
349 else if ((inst
.opcode
->iclass
== ldstpair_off
350 || (inst
.opcode
->iclass
== ldstpair_indexed
351 && inst
.operands
[2].addr
.preind
))
352 && strcmp ("stp", inst
.opcode
->name
) == 0)
354 /* STP with addressing mode Pre-indexed and Base register. */
357 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
358 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
360 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
361 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
362 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
363 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
364 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
365 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
367 /* If recording this store would invalidate the store area
368 (perhaps because rn is not known) then we should abandon
369 further prologue analysis. */
370 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
373 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
376 rt1
= inst
.operands
[0].reg
.regno
;
377 rt2
= inst
.operands
[1].reg
.regno
;
378 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
380 /* Only bottom 64-bit of each V register (D register) need
382 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
383 rt1
+= AARCH64_X_REGISTER_COUNT
;
384 rt2
+= AARCH64_X_REGISTER_COUNT
;
387 stack
.store (pv_add_constant (regs
[rn
], imm
), 8,
389 stack
.store (pv_add_constant (regs
[rn
], imm
+ 8), 8,
392 if (inst
.operands
[2].addr
.writeback
)
393 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
396 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
397 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
398 && (inst
.opcode
->op
== OP_STR_POS
399 || inst
.opcode
->op
== OP_STRF_POS
)))
400 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
401 && strcmp ("str", inst
.opcode
->name
) == 0)
403 /* STR (immediate) */
404 unsigned int rt
= inst
.operands
[0].reg
.regno
;
405 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
406 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
408 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
409 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
410 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
412 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
414 /* Only bottom 64-bit of each V register (D register) need
416 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
417 rt
+= AARCH64_X_REGISTER_COUNT
;
420 stack
.store (pv_add_constant (regs
[rn
], imm
),
421 is64
? 8 : 4, regs
[rt
]);
422 if (inst
.operands
[1].addr
.writeback
)
423 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
425 else if (inst
.opcode
->iclass
== testbranch
)
427 /* Stop analysis on branch. */
434 debug_printf ("aarch64: prologue analysis gave up addr=%s"
436 core_addr_to_string_nz (start
), insn
);
445 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
447 /* Frame pointer is fp. Frame size is constant. */
448 cache
->framereg
= AARCH64_FP_REGNUM
;
449 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
451 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
453 /* Try the stack pointer. */
454 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
455 cache
->framereg
= AARCH64_SP_REGNUM
;
459 /* We're just out of luck. We don't know where the frame is. */
460 cache
->framereg
= -1;
461 cache
->framesize
= 0;
464 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
468 if (stack
.find_reg (gdbarch
, i
, &offset
))
469 cache
->saved_regs
[i
].addr
= offset
;
472 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
474 int regnum
= gdbarch_num_regs (gdbarch
);
477 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
479 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
486 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
487 CORE_ADDR start
, CORE_ADDR limit
,
488 struct aarch64_prologue_cache
*cache
)
490 instruction_reader reader
;
492 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
498 namespace selftests
{
500 /* Instruction reader from manually cooked instruction sequences. */
502 class instruction_reader_test
: public abstract_instruction_reader
505 template<size_t SIZE
>
506 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
507 : m_insns (insns
), m_insns_size (SIZE
)
510 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
512 SELF_CHECK (len
== 4);
513 SELF_CHECK (memaddr
% 4 == 0);
514 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
516 return m_insns
[memaddr
/ 4];
520 const uint32_t *m_insns
;
525 aarch64_analyze_prologue_test (void)
527 struct gdbarch_info info
;
529 gdbarch_info_init (&info
);
530 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
532 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
533 SELF_CHECK (gdbarch
!= NULL
);
535 /* Test the simple prologue in which frame pointer is used. */
537 struct aarch64_prologue_cache cache
;
538 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
540 static const uint32_t insns
[] = {
541 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
542 0x910003fd, /* mov x29, sp */
543 0x97ffffe6, /* bl 0x400580 */
545 instruction_reader_test
reader (insns
);
547 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
548 SELF_CHECK (end
== 4 * 2);
550 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
551 SELF_CHECK (cache
.framesize
== 272);
553 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
555 if (i
== AARCH64_FP_REGNUM
)
556 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
557 else if (i
== AARCH64_LR_REGNUM
)
558 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
560 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
563 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
565 int regnum
= gdbarch_num_regs (gdbarch
);
567 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
572 /* Test a prologue in which STR is used and frame pointer is not
575 struct aarch64_prologue_cache cache
;
576 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
578 static const uint32_t insns
[] = {
579 0xf81d0ff3, /* str x19, [sp, #-48]! */
580 0xb9002fe0, /* str w0, [sp, #44] */
581 0xf90013e1, /* str x1, [sp, #32]*/
582 0xfd000fe0, /* str d0, [sp, #24] */
583 0xaa0203f3, /* mov x19, x2 */
584 0xf94013e0, /* ldr x0, [sp, #32] */
586 instruction_reader_test
reader (insns
);
588 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
590 SELF_CHECK (end
== 4 * 5);
592 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
593 SELF_CHECK (cache
.framesize
== 48);
595 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
598 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
600 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
602 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
605 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
607 int regnum
= gdbarch_num_regs (gdbarch
);
610 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
613 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
618 } // namespace selftests
619 #endif /* GDB_SELF_TEST */
621 /* Implement the "skip_prologue" gdbarch method. */
624 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
626 CORE_ADDR func_addr
, limit_pc
;
628 /* See if we can determine the end of the prologue via the symbol
629 table. If so, then return either PC, or the PC after the
630 prologue, whichever is greater. */
631 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
633 CORE_ADDR post_prologue_pc
634 = skip_prologue_using_sal (gdbarch
, func_addr
);
636 if (post_prologue_pc
!= 0)
637 return std::max (pc
, post_prologue_pc
);
640 /* Can't determine prologue from the symbol table, need to examine
643 /* Find an upper limit on the function prologue using the debug
644 information. If the debug information could not be used to
645 provide that bound, then use an arbitrary large number as the
647 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
649 limit_pc
= pc
+ 128; /* Magic. */
651 /* Try disassembling prologue. */
652 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
655 /* Scan the function prologue for THIS_FRAME and populate the prologue
659 aarch64_scan_prologue (struct frame_info
*this_frame
,
660 struct aarch64_prologue_cache
*cache
)
662 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
663 CORE_ADDR prologue_start
;
664 CORE_ADDR prologue_end
;
665 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
666 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
668 cache
->prev_pc
= prev_pc
;
670 /* Assume we do not find a frame. */
671 cache
->framereg
= -1;
672 cache
->framesize
= 0;
674 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
677 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
681 /* No line info so use the current PC. */
682 prologue_end
= prev_pc
;
684 else if (sal
.end
< prologue_end
)
686 /* The next line begins after the function end. */
687 prologue_end
= sal
.end
;
690 prologue_end
= std::min (prologue_end
, prev_pc
);
691 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
697 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
701 cache
->framereg
= AARCH64_FP_REGNUM
;
702 cache
->framesize
= 16;
703 cache
->saved_regs
[29].addr
= 0;
704 cache
->saved_regs
[30].addr
= 8;
708 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
709 function may throw an exception if the inferior's registers or memory is
713 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
714 struct aarch64_prologue_cache
*cache
)
716 CORE_ADDR unwound_fp
;
719 aarch64_scan_prologue (this_frame
, cache
);
721 if (cache
->framereg
== -1)
724 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
728 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
730 /* Calculate actual addresses of saved registers using offsets
731 determined by aarch64_analyze_prologue. */
732 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
733 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
734 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
736 cache
->func
= get_frame_func (this_frame
);
738 cache
->available_p
= 1;
741 /* Allocate and fill in *THIS_CACHE with information about the prologue of
742 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
743 Return a pointer to the current aarch64_prologue_cache in
746 static struct aarch64_prologue_cache
*
747 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
749 struct aarch64_prologue_cache
*cache
;
751 if (*this_cache
!= NULL
)
752 return (struct aarch64_prologue_cache
*) *this_cache
;
754 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
755 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
760 aarch64_make_prologue_cache_1 (this_frame
, cache
);
762 CATCH (ex
, RETURN_MASK_ERROR
)
764 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
765 throw_exception (ex
);
772 /* Implement the "stop_reason" frame_unwind method. */
774 static enum unwind_stop_reason
775 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
778 struct aarch64_prologue_cache
*cache
779 = aarch64_make_prologue_cache (this_frame
, this_cache
);
781 if (!cache
->available_p
)
782 return UNWIND_UNAVAILABLE
;
784 /* Halt the backtrace at "_start". */
785 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
786 return UNWIND_OUTERMOST
;
788 /* We've hit a wall, stop. */
789 if (cache
->prev_sp
== 0)
790 return UNWIND_OUTERMOST
;
792 return UNWIND_NO_REASON
;
795 /* Our frame ID for a normal frame is the current function's starting
796 PC and the caller's SP when we were called. */
799 aarch64_prologue_this_id (struct frame_info
*this_frame
,
800 void **this_cache
, struct frame_id
*this_id
)
802 struct aarch64_prologue_cache
*cache
803 = aarch64_make_prologue_cache (this_frame
, this_cache
);
805 if (!cache
->available_p
)
806 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
808 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
811 /* Implement the "prev_register" frame_unwind method. */
813 static struct value
*
814 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
815 void **this_cache
, int prev_regnum
)
817 struct aarch64_prologue_cache
*cache
818 = aarch64_make_prologue_cache (this_frame
, this_cache
);
820 /* If we are asked to unwind the PC, then we need to return the LR
821 instead. The prologue may save PC, but it will point into this
822 frame's prologue, not the next frame's resume location. */
823 if (prev_regnum
== AARCH64_PC_REGNUM
)
827 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
828 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
831 /* SP is generally not saved to the stack, but this frame is
832 identified by the next frame's stack pointer at the time of the
833 call. The value was already reconstructed into PREV_SP. */
846 if (prev_regnum
== AARCH64_SP_REGNUM
)
847 return frame_unwind_got_constant (this_frame
, prev_regnum
,
850 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
854 /* AArch64 prologue unwinder. */
855 struct frame_unwind aarch64_prologue_unwind
=
858 aarch64_prologue_frame_unwind_stop_reason
,
859 aarch64_prologue_this_id
,
860 aarch64_prologue_prev_register
,
862 default_frame_sniffer
865 /* Allocate and fill in *THIS_CACHE with information about the prologue of
866 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
867 Return a pointer to the current aarch64_prologue_cache in
870 static struct aarch64_prologue_cache
*
871 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
873 struct aarch64_prologue_cache
*cache
;
875 if (*this_cache
!= NULL
)
876 return (struct aarch64_prologue_cache
*) *this_cache
;
878 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
879 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
884 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
886 cache
->prev_pc
= get_frame_pc (this_frame
);
887 cache
->available_p
= 1;
889 CATCH (ex
, RETURN_MASK_ERROR
)
891 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
892 throw_exception (ex
);
899 /* Implement the "stop_reason" frame_unwind method. */
901 static enum unwind_stop_reason
902 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
905 struct aarch64_prologue_cache
*cache
906 = aarch64_make_stub_cache (this_frame
, this_cache
);
908 if (!cache
->available_p
)
909 return UNWIND_UNAVAILABLE
;
911 return UNWIND_NO_REASON
;
914 /* Our frame ID for a stub frame is the current SP and LR. */
917 aarch64_stub_this_id (struct frame_info
*this_frame
,
918 void **this_cache
, struct frame_id
*this_id
)
920 struct aarch64_prologue_cache
*cache
921 = aarch64_make_stub_cache (this_frame
, this_cache
);
923 if (cache
->available_p
)
924 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
926 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
929 /* Implement the "sniffer" frame_unwind method. */
932 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
933 struct frame_info
*this_frame
,
934 void **this_prologue_cache
)
936 CORE_ADDR addr_in_block
;
939 addr_in_block
= get_frame_address_in_block (this_frame
);
940 if (in_plt_section (addr_in_block
)
941 /* We also use the stub winder if the target memory is unreadable
942 to avoid having the prologue unwinder trying to read it. */
943 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
949 /* AArch64 stub unwinder. */
950 struct frame_unwind aarch64_stub_unwind
=
953 aarch64_stub_frame_unwind_stop_reason
,
954 aarch64_stub_this_id
,
955 aarch64_prologue_prev_register
,
957 aarch64_stub_unwind_sniffer
960 /* Return the frame base address of *THIS_FRAME. */
963 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
965 struct aarch64_prologue_cache
*cache
966 = aarch64_make_prologue_cache (this_frame
, this_cache
);
968 return cache
->prev_sp
- cache
->framesize
;
971 /* AArch64 default frame base information. */
972 struct frame_base aarch64_normal_base
=
974 &aarch64_prologue_unwind
,
975 aarch64_normal_frame_base
,
976 aarch64_normal_frame_base
,
977 aarch64_normal_frame_base
980 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
981 dummy frame. The frame ID's base needs to match the TOS value
982 saved by save_dummy_frame_tos () and returned from
983 aarch64_push_dummy_call, and the PC needs to match the dummy
984 frame's breakpoint. */
986 static struct frame_id
987 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
989 return frame_id_build (get_frame_register_unsigned (this_frame
,
991 get_frame_pc (this_frame
));
994 /* Implement the "unwind_pc" gdbarch method. */
997 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1000 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1005 /* Implement the "unwind_sp" gdbarch method. */
1008 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1010 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1013 /* Return the value of the REGNUM register in the previous frame of
1016 static struct value
*
1017 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1018 void **this_cache
, int regnum
)
1024 case AARCH64_PC_REGNUM
:
1025 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1026 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1029 internal_error (__FILE__
, __LINE__
,
1030 _("Unexpected register %d"), regnum
);
1034 /* Implement the "init_reg" dwarf2_frame_ops method. */
1037 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1038 struct dwarf2_frame_state_reg
*reg
,
1039 struct frame_info
*this_frame
)
1043 case AARCH64_PC_REGNUM
:
1044 reg
->how
= DWARF2_FRAME_REG_FN
;
1045 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1047 case AARCH64_SP_REGNUM
:
1048 reg
->how
= DWARF2_FRAME_REG_CFA
;
1053 /* When arguments must be pushed onto the stack, they go on in reverse
1054 order. The code below implements a FILO (stack) to do this. */
1058 /* Value to pass on stack. It can be NULL if this item is for stack
1060 const gdb_byte
*data
;
1062 /* Size in bytes of value to pass on stack. */
1066 DEF_VEC_O (stack_item_t
);
1068 /* Return the alignment (in bytes) of the given type. */
1071 aarch64_type_align (struct type
*t
)
1077 t
= check_typedef (t
);
1078 switch (TYPE_CODE (t
))
1081 /* Should never happen. */
1082 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1086 case TYPE_CODE_ENUM
:
1090 case TYPE_CODE_RANGE
:
1091 case TYPE_CODE_BITSTRING
:
1093 case TYPE_CODE_RVALUE_REF
:
1094 case TYPE_CODE_CHAR
:
1095 case TYPE_CODE_BOOL
:
1096 return TYPE_LENGTH (t
);
1098 case TYPE_CODE_ARRAY
:
1099 if (TYPE_VECTOR (t
))
1101 /* Use the natural alignment for vector types (the same for
1102 scalar type), but the maximum alignment is 128-bit. */
1103 if (TYPE_LENGTH (t
) > 16)
1106 return TYPE_LENGTH (t
);
1109 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1110 case TYPE_CODE_COMPLEX
:
1111 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1113 case TYPE_CODE_STRUCT
:
1114 case TYPE_CODE_UNION
:
1116 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1118 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1126 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1127 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1128 document; otherwise return 0. */
1131 is_hfa_or_hva (struct type
*ty
)
1133 switch (TYPE_CODE (ty
))
1135 case TYPE_CODE_ARRAY
:
1137 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1139 if (TYPE_VECTOR (ty
))
1142 if (TYPE_LENGTH (ty
) <= 4 /* HFA or HVA has at most 4 members. */
1143 && (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
/* HFA */
1144 || (TYPE_CODE (target_ty
) == TYPE_CODE_ARRAY
/* HVA */
1145 && TYPE_VECTOR (target_ty
))))
1150 case TYPE_CODE_UNION
:
1151 case TYPE_CODE_STRUCT
:
1153 /* HFA or HVA has at most four members. */
1154 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1156 struct type
*member0_type
;
1158 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1159 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
1160 || (TYPE_CODE (member0_type
) == TYPE_CODE_ARRAY
1161 && TYPE_VECTOR (member0_type
)))
1165 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1167 struct type
*member1_type
;
1169 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1170 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1171 || (TYPE_LENGTH (member0_type
)
1172 != TYPE_LENGTH (member1_type
)))
1188 /* AArch64 function call information structure. */
1189 struct aarch64_call_info
1191 /* the current argument number. */
1194 /* The next general purpose register number, equivalent to NGRN as
1195 described in the AArch64 Procedure Call Standard. */
1198 /* The next SIMD and floating point register number, equivalent to
1199 NSRN as described in the AArch64 Procedure Call Standard. */
1202 /* The next stacked argument address, equivalent to NSAA as
1203 described in the AArch64 Procedure Call Standard. */
1206 /* Stack item vector. */
1207 VEC(stack_item_t
) *si
;
1210 /* Pass a value in a sequence of consecutive X registers. The caller
1211 is responsbile for ensuring sufficient registers are available. */
1214 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1215 struct aarch64_call_info
*info
, struct type
*type
,
1218 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1219 int len
= TYPE_LENGTH (type
);
1220 enum type_code typecode
= TYPE_CODE (type
);
1221 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1222 const bfd_byte
*buf
= value_contents (arg
);
1228 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1229 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1233 /* Adjust sub-word struct/union args when big-endian. */
1234 if (byte_order
== BFD_ENDIAN_BIG
1235 && partial_len
< X_REGISTER_SIZE
1236 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1237 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1241 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1242 gdbarch_register_name (gdbarch
, regnum
),
1243 phex (regval
, X_REGISTER_SIZE
));
1245 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1252 /* Attempt to marshall a value in a V register. Return 1 if
1253 successful, or 0 if insufficient registers are available. This
1254 function, unlike the equivalent pass_in_x() function does not
1255 handle arguments spread across multiple registers. */
1258 pass_in_v (struct gdbarch
*gdbarch
,
1259 struct regcache
*regcache
,
1260 struct aarch64_call_info
*info
,
1261 int len
, const bfd_byte
*buf
)
1265 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1266 gdb_byte reg
[V_REGISTER_SIZE
];
1271 memset (reg
, 0, sizeof (reg
));
1272 /* PCS C.1, the argument is allocated to the least significant
1273 bits of V register. */
1274 memcpy (reg
, buf
, len
);
1275 regcache_cooked_write (regcache
, regnum
, reg
);
1279 debug_printf ("arg %d in %s\n", info
->argnum
,
1280 gdbarch_register_name (gdbarch
, regnum
));
1288 /* Marshall an argument onto the stack. */
1291 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1294 const bfd_byte
*buf
= value_contents (arg
);
1295 int len
= TYPE_LENGTH (type
);
1301 align
= aarch64_type_align (type
);
1303 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1304 Natural alignment of the argument's type. */
1305 align
= align_up (align
, 8);
1307 /* The AArch64 PCS requires at most doubleword alignment. */
1313 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1319 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1322 if (info
->nsaa
& (align
- 1))
1324 /* Push stack alignment padding. */
1325 int pad
= align
- (info
->nsaa
& (align
- 1));
1330 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1335 /* Marshall an argument into a sequence of one or more consecutive X
1336 registers or, if insufficient X registers are available then onto
1340 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1341 struct aarch64_call_info
*info
, struct type
*type
,
1344 int len
= TYPE_LENGTH (type
);
1345 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1347 /* PCS C.13 - Pass in registers if we have enough spare */
1348 if (info
->ngrn
+ nregs
<= 8)
1350 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1351 info
->ngrn
+= nregs
;
1356 pass_on_stack (info
, type
, arg
);
1360 /* Pass a value in a V register, or on the stack if insufficient are
1364 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1365 struct regcache
*regcache
,
1366 struct aarch64_call_info
*info
,
1370 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (type
),
1371 value_contents (arg
)))
1372 pass_on_stack (info
, type
, arg
);
1375 /* Implement the "push_dummy_call" gdbarch method. */
1378 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1379 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1381 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1382 CORE_ADDR struct_addr
)
1385 struct aarch64_call_info info
;
1386 struct type
*func_type
;
1387 struct type
*return_type
;
1388 int lang_struct_return
;
1390 memset (&info
, 0, sizeof (info
));
1392 /* We need to know what the type of the called function is in order
1393 to determine the number of named/anonymous arguments for the
1394 actual argument placement, and the return type in order to handle
1395 return value correctly.
1397 The generic code above us views the decision of return in memory
1398 or return in registers as a two stage processes. The language
1399 handler is consulted first and may decide to return in memory (eg
1400 class with copy constructor returned by value), this will cause
1401 the generic code to allocate space AND insert an initial leading
1404 If the language code does not decide to pass in memory then the
1405 target code is consulted.
1407 If the language code decides to pass in memory we want to move
1408 the pointer inserted as the initial argument from the argument
1409 list and into X8, the conventional AArch64 struct return pointer
1412 This is slightly awkward, ideally the flag "lang_struct_return"
1413 would be passed to the targets implementation of push_dummy_call.
1414 Rather that change the target interface we call the language code
1415 directly ourselves. */
1417 func_type
= check_typedef (value_type (function
));
1419 /* Dereference function pointer types. */
1420 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1421 func_type
= TYPE_TARGET_TYPE (func_type
);
1423 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1424 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1426 /* If language_pass_by_reference () returned true we will have been
1427 given an additional initial argument, a hidden pointer to the
1428 return slot in memory. */
1429 return_type
= TYPE_TARGET_TYPE (func_type
);
1430 lang_struct_return
= language_pass_by_reference (return_type
);
1432 /* Set the return address. For the AArch64, the return breakpoint
1433 is always at BP_ADDR. */
1434 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1436 /* If we were given an initial argument for the return slot because
1437 lang_struct_return was true, lose it. */
1438 if (lang_struct_return
)
1444 /* The struct_return pointer occupies X8. */
1445 if (struct_return
|| lang_struct_return
)
1449 debug_printf ("struct return in %s = 0x%s\n",
1450 gdbarch_register_name (gdbarch
,
1451 AARCH64_STRUCT_RETURN_REGNUM
),
1452 paddress (gdbarch
, struct_addr
));
1454 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1458 for (argnum
= 0; argnum
< nargs
; argnum
++)
1460 struct value
*arg
= args
[argnum
];
1461 struct type
*arg_type
;
1464 arg_type
= check_typedef (value_type (arg
));
1465 len
= TYPE_LENGTH (arg_type
);
1467 switch (TYPE_CODE (arg_type
))
1470 case TYPE_CODE_BOOL
:
1471 case TYPE_CODE_CHAR
:
1472 case TYPE_CODE_RANGE
:
1473 case TYPE_CODE_ENUM
:
1476 /* Promote to 32 bit integer. */
1477 if (TYPE_UNSIGNED (arg_type
))
1478 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1480 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1481 arg
= value_cast (arg_type
, arg
);
1483 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1486 case TYPE_CODE_COMPLEX
:
1489 const bfd_byte
*buf
= value_contents (arg
);
1490 struct type
*target_type
=
1491 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1493 pass_in_v (gdbarch
, regcache
, &info
,
1494 TYPE_LENGTH (target_type
), buf
);
1495 pass_in_v (gdbarch
, regcache
, &info
,
1496 TYPE_LENGTH (target_type
),
1497 buf
+ TYPE_LENGTH (target_type
));
1502 pass_on_stack (&info
, arg_type
, arg
);
1506 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1509 case TYPE_CODE_STRUCT
:
1510 case TYPE_CODE_ARRAY
:
1511 case TYPE_CODE_UNION
:
1512 if (is_hfa_or_hva (arg_type
))
1514 int elements
= TYPE_NFIELDS (arg_type
);
1516 /* Homogeneous Aggregates */
1517 if (info
.nsrn
+ elements
< 8)
1521 for (i
= 0; i
< elements
; i
++)
1523 /* We know that we have sufficient registers
1524 available therefore this will never fallback
1526 struct value
*field
=
1527 value_primitive_field (arg
, 0, i
, arg_type
);
1528 struct type
*field_type
=
1529 check_typedef (value_type (field
));
1531 pass_in_v_or_stack (gdbarch
, regcache
, &info
,
1538 pass_on_stack (&info
, arg_type
, arg
);
1541 else if (TYPE_CODE (arg_type
) == TYPE_CODE_ARRAY
1542 && TYPE_VECTOR (arg_type
) && (len
== 16 || len
== 8))
1544 /* Short vector types are passed in V registers. */
1545 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1549 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1550 invisible reference. */
1552 /* Allocate aligned storage. */
1553 sp
= align_down (sp
- len
, 16);
1555 /* Write the real data into the stack. */
1556 write_memory (sp
, value_contents (arg
), len
);
1558 /* Construct the indirection. */
1559 arg_type
= lookup_pointer_type (arg_type
);
1560 arg
= value_from_pointer (arg_type
, sp
);
1561 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1564 /* PCS C.15 / C.18 multiple values pass. */
1565 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1569 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1574 /* Make sure stack retains 16 byte alignment. */
1576 sp
-= 16 - (info
.nsaa
& 15);
1578 while (!VEC_empty (stack_item_t
, info
.si
))
1580 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1583 if (si
->data
!= NULL
)
1584 write_memory (sp
, si
->data
, si
->len
);
1585 VEC_pop (stack_item_t
, info
.si
);
1588 VEC_free (stack_item_t
, info
.si
);
1590 /* Finally, update the SP register. */
1591 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1596 /* Implement the "frame_align" gdbarch method. */
1599 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1601 /* Align the stack to sixteen bytes. */
1602 return sp
& ~(CORE_ADDR
) 15;
1605 /* Return the type for an AdvSISD Q register. */
1607 static struct type
*
1608 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1610 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1612 if (tdep
->vnq_type
== NULL
)
1617 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1620 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1621 append_composite_type_field (t
, "u", elem
);
1623 elem
= builtin_type (gdbarch
)->builtin_int128
;
1624 append_composite_type_field (t
, "s", elem
);
1629 return tdep
->vnq_type
;
1632 /* Return the type for an AdvSISD D register. */
1634 static struct type
*
1635 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1637 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1639 if (tdep
->vnd_type
== NULL
)
1644 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1647 elem
= builtin_type (gdbarch
)->builtin_double
;
1648 append_composite_type_field (t
, "f", elem
);
1650 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1651 append_composite_type_field (t
, "u", elem
);
1653 elem
= builtin_type (gdbarch
)->builtin_int64
;
1654 append_composite_type_field (t
, "s", elem
);
1659 return tdep
->vnd_type
;
1662 /* Return the type for an AdvSISD S register. */
1664 static struct type
*
1665 aarch64_vns_type (struct gdbarch
*gdbarch
)
1667 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1669 if (tdep
->vns_type
== NULL
)
1674 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1677 elem
= builtin_type (gdbarch
)->builtin_float
;
1678 append_composite_type_field (t
, "f", elem
);
1680 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1681 append_composite_type_field (t
, "u", elem
);
1683 elem
= builtin_type (gdbarch
)->builtin_int32
;
1684 append_composite_type_field (t
, "s", elem
);
1689 return tdep
->vns_type
;
1692 /* Return the type for an AdvSISD H register. */
1694 static struct type
*
1695 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1697 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1699 if (tdep
->vnh_type
== NULL
)
1704 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1707 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1708 append_composite_type_field (t
, "u", elem
);
1710 elem
= builtin_type (gdbarch
)->builtin_int16
;
1711 append_composite_type_field (t
, "s", elem
);
1716 return tdep
->vnh_type
;
1719 /* Return the type for an AdvSISD B register. */
1721 static struct type
*
1722 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1724 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1726 if (tdep
->vnb_type
== NULL
)
1731 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1734 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1735 append_composite_type_field (t
, "u", elem
);
1737 elem
= builtin_type (gdbarch
)->builtin_int8
;
1738 append_composite_type_field (t
, "s", elem
);
1743 return tdep
->vnb_type
;
1746 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1749 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1751 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1752 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1754 if (reg
== AARCH64_DWARF_SP
)
1755 return AARCH64_SP_REGNUM
;
1757 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1758 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1764 /* Implement the "print_insn" gdbarch method. */
1767 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1769 info
->symbols
= NULL
;
1770 return default_print_insn (memaddr
, info
);
1773 /* AArch64 BRK software debug mode instruction.
1774 Note that AArch64 code is always little-endian.
1775 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1776 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1778 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
1780 /* Extract from an array REGS containing the (raw) register state a
1781 function return value of type TYPE, and copy that, in virtual
1782 format, into VALBUF. */
1785 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1788 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1789 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1791 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1793 bfd_byte buf
[V_REGISTER_SIZE
];
1794 int len
= TYPE_LENGTH (type
);
1796 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1797 memcpy (valbuf
, buf
, len
);
1799 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1800 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1801 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1802 || TYPE_CODE (type
) == TYPE_CODE_PTR
1803 || TYPE_IS_REFERENCE (type
)
1804 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1806 /* If the the type is a plain integer, then the access is
1807 straight-forward. Otherwise we have to play around a bit
1809 int len
= TYPE_LENGTH (type
);
1810 int regno
= AARCH64_X0_REGNUM
;
1815 /* By using store_unsigned_integer we avoid having to do
1816 anything special for small big-endian values. */
1817 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1818 store_unsigned_integer (valbuf
,
1819 (len
> X_REGISTER_SIZE
1820 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1821 len
-= X_REGISTER_SIZE
;
1822 valbuf
+= X_REGISTER_SIZE
;
1825 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
1827 int regno
= AARCH64_V0_REGNUM
;
1828 bfd_byte buf
[V_REGISTER_SIZE
];
1829 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1830 int len
= TYPE_LENGTH (target_type
);
1832 regcache_cooked_read (regs
, regno
, buf
);
1833 memcpy (valbuf
, buf
, len
);
1835 regcache_cooked_read (regs
, regno
+ 1, buf
);
1836 memcpy (valbuf
, buf
, len
);
1839 else if (is_hfa_or_hva (type
))
1841 int elements
= TYPE_NFIELDS (type
);
1842 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1843 int len
= TYPE_LENGTH (member_type
);
1846 for (i
= 0; i
< elements
; i
++)
1848 int regno
= AARCH64_V0_REGNUM
+ i
;
1849 bfd_byte buf
[V_REGISTER_SIZE
];
1853 debug_printf ("read HFA or HVA return value element %d from %s\n",
1855 gdbarch_register_name (gdbarch
, regno
));
1857 regcache_cooked_read (regs
, regno
, buf
);
1859 memcpy (valbuf
, buf
, len
);
1863 else if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (type
)
1864 && (TYPE_LENGTH (type
) == 16 || TYPE_LENGTH (type
) == 8))
1866 /* Short vector is returned in V register. */
1867 gdb_byte buf
[V_REGISTER_SIZE
];
1869 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1870 memcpy (valbuf
, buf
, TYPE_LENGTH (type
));
1874 /* For a structure or union the behaviour is as if the value had
1875 been stored to word-aligned memory and then loaded into
1876 registers with 64-bit load instruction(s). */
1877 int len
= TYPE_LENGTH (type
);
1878 int regno
= AARCH64_X0_REGNUM
;
1879 bfd_byte buf
[X_REGISTER_SIZE
];
1883 regcache_cooked_read (regs
, regno
++, buf
);
1884 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1885 len
-= X_REGISTER_SIZE
;
1886 valbuf
+= X_REGISTER_SIZE
;
1892 /* Will a function return an aggregate type in memory or in a
1893 register? Return 0 if an aggregate type can be returned in a
1894 register, 1 if it must be returned in memory. */
1897 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
1899 type
= check_typedef (type
);
1901 if (is_hfa_or_hva (type
))
1903 /* v0-v7 are used to return values and one register is allocated
1904 for one member. However, HFA or HVA has at most four members. */
1908 if (TYPE_LENGTH (type
) > 16)
1910 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1911 invisible reference. */
1919 /* Write into appropriate registers a function return value of type
1920 TYPE, given in virtual format. */
1923 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
1924 const gdb_byte
*valbuf
)
1926 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1927 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1929 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1931 bfd_byte buf
[V_REGISTER_SIZE
];
1932 int len
= TYPE_LENGTH (type
);
1934 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
1935 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
1937 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1938 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1939 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1940 || TYPE_CODE (type
) == TYPE_CODE_PTR
1941 || TYPE_IS_REFERENCE (type
)
1942 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1944 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
1946 /* Values of one word or less are zero/sign-extended and
1948 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
1949 LONGEST val
= unpack_long (type
, valbuf
);
1951 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
1952 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
1956 /* Integral values greater than one word are stored in
1957 consecutive registers starting with r0. This will always
1958 be a multiple of the regiser size. */
1959 int len
= TYPE_LENGTH (type
);
1960 int regno
= AARCH64_X0_REGNUM
;
1964 regcache_cooked_write (regs
, regno
++, valbuf
);
1965 len
-= X_REGISTER_SIZE
;
1966 valbuf
+= X_REGISTER_SIZE
;
1970 else if (is_hfa_or_hva (type
))
1972 int elements
= TYPE_NFIELDS (type
);
1973 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1974 int len
= TYPE_LENGTH (member_type
);
1977 for (i
= 0; i
< elements
; i
++)
1979 int regno
= AARCH64_V0_REGNUM
+ i
;
1980 bfd_byte tmpbuf
[V_REGISTER_SIZE
];
1984 debug_printf ("write HFA or HVA return value element %d to %s\n",
1986 gdbarch_register_name (gdbarch
, regno
));
1989 memcpy (tmpbuf
, valbuf
, len
);
1990 regcache_cooked_write (regs
, regno
, tmpbuf
);
1994 else if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (type
)
1995 && (TYPE_LENGTH (type
) == 8 || TYPE_LENGTH (type
) == 16))
1998 gdb_byte buf
[V_REGISTER_SIZE
];
2000 memcpy (buf
, valbuf
, TYPE_LENGTH (type
));
2001 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2005 /* For a structure or union the behaviour is as if the value had
2006 been stored to word-aligned memory and then loaded into
2007 registers with 64-bit load instruction(s). */
2008 int len
= TYPE_LENGTH (type
);
2009 int regno
= AARCH64_X0_REGNUM
;
2010 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2014 memcpy (tmpbuf
, valbuf
,
2015 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2016 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2017 len
-= X_REGISTER_SIZE
;
2018 valbuf
+= X_REGISTER_SIZE
;
2023 /* Implement the "return_value" gdbarch method. */
2025 static enum return_value_convention
2026 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2027 struct type
*valtype
, struct regcache
*regcache
,
2028 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2031 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2032 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2033 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2035 if (aarch64_return_in_memory (gdbarch
, valtype
))
2038 debug_printf ("return value in memory\n");
2039 return RETURN_VALUE_STRUCT_CONVENTION
;
2044 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2047 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2050 debug_printf ("return value in registers\n");
2052 return RETURN_VALUE_REGISTER_CONVENTION
;
2055 /* Implement the "get_longjmp_target" gdbarch method. */
2058 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2061 gdb_byte buf
[X_REGISTER_SIZE
];
2062 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2063 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2064 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2066 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2068 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2072 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2076 /* Implement the "gen_return_address" gdbarch method. */
2079 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2080 struct agent_expr
*ax
, struct axs_value
*value
,
2083 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2084 value
->kind
= axs_lvalue_register
;
2085 value
->u
.reg
= AARCH64_LR_REGNUM
;
2089 /* Return the pseudo register name corresponding to register regnum. */
2092 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2094 static const char *const q_name
[] =
2096 "q0", "q1", "q2", "q3",
2097 "q4", "q5", "q6", "q7",
2098 "q8", "q9", "q10", "q11",
2099 "q12", "q13", "q14", "q15",
2100 "q16", "q17", "q18", "q19",
2101 "q20", "q21", "q22", "q23",
2102 "q24", "q25", "q26", "q27",
2103 "q28", "q29", "q30", "q31",
2106 static const char *const d_name
[] =
2108 "d0", "d1", "d2", "d3",
2109 "d4", "d5", "d6", "d7",
2110 "d8", "d9", "d10", "d11",
2111 "d12", "d13", "d14", "d15",
2112 "d16", "d17", "d18", "d19",
2113 "d20", "d21", "d22", "d23",
2114 "d24", "d25", "d26", "d27",
2115 "d28", "d29", "d30", "d31",
2118 static const char *const s_name
[] =
2120 "s0", "s1", "s2", "s3",
2121 "s4", "s5", "s6", "s7",
2122 "s8", "s9", "s10", "s11",
2123 "s12", "s13", "s14", "s15",
2124 "s16", "s17", "s18", "s19",
2125 "s20", "s21", "s22", "s23",
2126 "s24", "s25", "s26", "s27",
2127 "s28", "s29", "s30", "s31",
2130 static const char *const h_name
[] =
2132 "h0", "h1", "h2", "h3",
2133 "h4", "h5", "h6", "h7",
2134 "h8", "h9", "h10", "h11",
2135 "h12", "h13", "h14", "h15",
2136 "h16", "h17", "h18", "h19",
2137 "h20", "h21", "h22", "h23",
2138 "h24", "h25", "h26", "h27",
2139 "h28", "h29", "h30", "h31",
2142 static const char *const b_name
[] =
2144 "b0", "b1", "b2", "b3",
2145 "b4", "b5", "b6", "b7",
2146 "b8", "b9", "b10", "b11",
2147 "b12", "b13", "b14", "b15",
2148 "b16", "b17", "b18", "b19",
2149 "b20", "b21", "b22", "b23",
2150 "b24", "b25", "b26", "b27",
2151 "b28", "b29", "b30", "b31",
2154 regnum
-= gdbarch_num_regs (gdbarch
);
2156 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2157 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2159 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2160 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2162 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2163 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2165 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2166 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2168 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2169 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2171 internal_error (__FILE__
, __LINE__
,
2172 _("aarch64_pseudo_register_name: bad register number %d"),
2176 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2178 static struct type
*
2179 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2181 regnum
-= gdbarch_num_regs (gdbarch
);
2183 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2184 return aarch64_vnq_type (gdbarch
);
2186 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2187 return aarch64_vnd_type (gdbarch
);
2189 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2190 return aarch64_vns_type (gdbarch
);
2192 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2193 return aarch64_vnh_type (gdbarch
);
2195 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2196 return aarch64_vnb_type (gdbarch
);
2198 internal_error (__FILE__
, __LINE__
,
2199 _("aarch64_pseudo_register_type: bad register number %d"),
2203 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2206 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2207 struct reggroup
*group
)
2209 regnum
-= gdbarch_num_regs (gdbarch
);
2211 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2212 return group
== all_reggroup
|| group
== vector_reggroup
;
2213 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2214 return (group
== all_reggroup
|| group
== vector_reggroup
2215 || group
== float_reggroup
);
2216 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2217 return (group
== all_reggroup
|| group
== vector_reggroup
2218 || group
== float_reggroup
);
2219 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2220 return group
== all_reggroup
|| group
== vector_reggroup
;
2221 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2222 return group
== all_reggroup
|| group
== vector_reggroup
;
2224 return group
== all_reggroup
;
2227 /* Implement the "pseudo_register_read_value" gdbarch method. */
2229 static struct value
*
2230 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2231 struct regcache
*regcache
,
2234 gdb_byte reg_buf
[V_REGISTER_SIZE
];
2235 struct value
*result_value
;
2238 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2239 VALUE_LVAL (result_value
) = lval_register
;
2240 VALUE_REGNUM (result_value
) = regnum
;
2241 buf
= value_contents_raw (result_value
);
2243 regnum
-= gdbarch_num_regs (gdbarch
);
2245 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2247 enum register_status status
;
2250 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2251 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2252 if (status
!= REG_VALID
)
2253 mark_value_bytes_unavailable (result_value
, 0,
2254 TYPE_LENGTH (value_type (result_value
)));
2256 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2257 return result_value
;
2260 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2262 enum register_status status
;
2265 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2266 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2267 if (status
!= REG_VALID
)
2268 mark_value_bytes_unavailable (result_value
, 0,
2269 TYPE_LENGTH (value_type (result_value
)));
2271 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2272 return result_value
;
2275 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2277 enum register_status status
;
2280 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2281 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2282 if (status
!= REG_VALID
)
2283 mark_value_bytes_unavailable (result_value
, 0,
2284 TYPE_LENGTH (value_type (result_value
)));
2286 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2287 return result_value
;
2290 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2292 enum register_status status
;
2295 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2296 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2297 if (status
!= REG_VALID
)
2298 mark_value_bytes_unavailable (result_value
, 0,
2299 TYPE_LENGTH (value_type (result_value
)));
2301 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2302 return result_value
;
2305 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2307 enum register_status status
;
2310 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2311 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2312 if (status
!= REG_VALID
)
2313 mark_value_bytes_unavailable (result_value
, 0,
2314 TYPE_LENGTH (value_type (result_value
)));
2316 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2317 return result_value
;
2320 gdb_assert_not_reached ("regnum out of bound");
2323 /* Implement the "pseudo_register_write" gdbarch method. */
2326 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2327 int regnum
, const gdb_byte
*buf
)
2329 gdb_byte reg_buf
[V_REGISTER_SIZE
];
2331 /* Ensure the register buffer is zero, we want gdb writes of the
2332 various 'scalar' pseudo registers to behavior like architectural
2333 writes, register width bytes are written the remainder are set to
2335 memset (reg_buf
, 0, sizeof (reg_buf
));
2337 regnum
-= gdbarch_num_regs (gdbarch
);
2339 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2341 /* pseudo Q registers */
2344 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2345 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2346 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2350 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2352 /* pseudo D registers */
2355 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2356 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2357 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2361 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2365 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2366 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2367 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2371 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2373 /* pseudo H registers */
2376 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2377 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2378 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2382 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2384 /* pseudo B registers */
2387 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2388 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2389 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2393 gdb_assert_not_reached ("regnum out of bound");
2396 /* Callback function for user_reg_add. */
2398 static struct value
*
2399 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2401 const int *reg_p
= (const int *) baton
;
2403 return value_of_register (*reg_p
, frame
);
2407 /* Implement the "software_single_step" gdbarch method, needed to
2408 single step through atomic sequences on AArch64. */
2410 static std::vector
<CORE_ADDR
>
2411 aarch64_software_single_step (struct regcache
*regcache
)
2413 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2414 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2415 const int insn_size
= 4;
2416 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2417 CORE_ADDR pc
= regcache_read_pc (regcache
);
2418 CORE_ADDR breaks
[2] = { -1, -1 };
2420 CORE_ADDR closing_insn
= 0;
2421 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2422 byte_order_for_code
);
2425 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2426 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2429 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2432 /* Look for a Load Exclusive instruction which begins the sequence. */
2433 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2436 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2439 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2440 byte_order_for_code
);
2442 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2444 /* Check if the instruction is a conditional branch. */
2445 if (inst
.opcode
->iclass
== condbranch
)
2447 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2449 if (bc_insn_count
>= 1)
2452 /* It is, so we'll try to set a breakpoint at the destination. */
2453 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2459 /* Look for the Store Exclusive which closes the atomic sequence. */
2460 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2467 /* We didn't find a closing Store Exclusive instruction, fall back. */
2471 /* Insert breakpoint after the end of the atomic sequence. */
2472 breaks
[0] = loc
+ insn_size
;
2474 /* Check for duplicated breakpoints, and also check that the second
2475 breakpoint is not within the atomic sequence. */
2477 && (breaks
[1] == breaks
[0]
2478 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2479 last_breakpoint
= 0;
2481 std::vector
<CORE_ADDR
> next_pcs
;
2483 /* Insert the breakpoint at the end of the sequence, and one at the
2484 destination of the conditional branch, if it exists. */
2485 for (index
= 0; index
<= last_breakpoint
; index
++)
2486 next_pcs
.push_back (breaks
[index
]);
2491 struct displaced_step_closure
2493 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2494 is being displaced stepping. */
2497 /* PC adjustment offset after displaced stepping. */
2501 /* Data when visiting instructions for displaced stepping. */
2503 struct aarch64_displaced_step_data
2505 struct aarch64_insn_data base
;
2507 /* The address where the instruction will be executed at. */
2509 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2510 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2511 /* Number of instructions in INSN_BUF. */
2512 unsigned insn_count
;
2513 /* Registers when doing displaced stepping. */
2514 struct regcache
*regs
;
2516 struct displaced_step_closure
*dsc
;
2519 /* Implementation of aarch64_insn_visitor method "b". */
2522 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2523 struct aarch64_insn_data
*data
)
2525 struct aarch64_displaced_step_data
*dsd
2526 = (struct aarch64_displaced_step_data
*) data
;
2527 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2529 if (can_encode_int32 (new_offset
, 28))
2531 /* Emit B rather than BL, because executing BL on a new address
2532 will get the wrong address into LR. In order to avoid this,
2533 we emit B, and update LR if the instruction is BL. */
2534 emit_b (dsd
->insn_buf
, 0, new_offset
);
2540 emit_nop (dsd
->insn_buf
);
2542 dsd
->dsc
->pc_adjust
= offset
;
2548 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2549 data
->insn_addr
+ 4);
2553 /* Implementation of aarch64_insn_visitor method "b_cond". */
2556 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2557 struct aarch64_insn_data
*data
)
2559 struct aarch64_displaced_step_data
*dsd
2560 = (struct aarch64_displaced_step_data
*) data
;
2562 /* GDB has to fix up PC after displaced step this instruction
2563 differently according to the condition is true or false. Instead
2564 of checking COND against conditional flags, we can use
2565 the following instructions, and GDB can tell how to fix up PC
2566 according to the PC value.
2568 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2574 emit_bcond (dsd
->insn_buf
, cond
, 8);
2576 dsd
->dsc
->pc_adjust
= offset
;
2577 dsd
->insn_count
= 1;
2580 /* Dynamically allocate a new register. If we know the register
2581 statically, we should make it a global as above instead of using this
2584 static struct aarch64_register
2585 aarch64_register (unsigned num
, int is64
)
2587 return (struct aarch64_register
) { num
, is64
};
2590 /* Implementation of aarch64_insn_visitor method "cb". */
2593 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2594 const unsigned rn
, int is64
,
2595 struct aarch64_insn_data
*data
)
2597 struct aarch64_displaced_step_data
*dsd
2598 = (struct aarch64_displaced_step_data
*) data
;
2600 /* The offset is out of range for a compare and branch
2601 instruction. We can use the following instructions instead:
2603 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2608 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2609 dsd
->insn_count
= 1;
2611 dsd
->dsc
->pc_adjust
= offset
;
2614 /* Implementation of aarch64_insn_visitor method "tb". */
2617 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2618 const unsigned rt
, unsigned bit
,
2619 struct aarch64_insn_data
*data
)
2621 struct aarch64_displaced_step_data
*dsd
2622 = (struct aarch64_displaced_step_data
*) data
;
2624 /* The offset is out of range for a test bit and branch
2625 instruction We can use the following instructions instead:
2627 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2633 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2634 dsd
->insn_count
= 1;
2636 dsd
->dsc
->pc_adjust
= offset
;
2639 /* Implementation of aarch64_insn_visitor method "adr". */
2642 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2643 const int is_adrp
, struct aarch64_insn_data
*data
)
2645 struct aarch64_displaced_step_data
*dsd
2646 = (struct aarch64_displaced_step_data
*) data
;
2647 /* We know exactly the address the ADR{P,} instruction will compute.
2648 We can just write it to the destination register. */
2649 CORE_ADDR address
= data
->insn_addr
+ offset
;
2653 /* Clear the lower 12 bits of the offset to get the 4K page. */
2654 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2658 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2661 dsd
->dsc
->pc_adjust
= 4;
2662 emit_nop (dsd
->insn_buf
);
2663 dsd
->insn_count
= 1;
2666 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2669 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2670 const unsigned rt
, const int is64
,
2671 struct aarch64_insn_data
*data
)
2673 struct aarch64_displaced_step_data
*dsd
2674 = (struct aarch64_displaced_step_data
*) data
;
2675 CORE_ADDR address
= data
->insn_addr
+ offset
;
2676 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2678 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2682 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2683 aarch64_register (rt
, 1), zero
);
2685 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2686 aarch64_register (rt
, 1), zero
);
2688 dsd
->dsc
->pc_adjust
= 4;
2691 /* Implementation of aarch64_insn_visitor method "others". */
2694 aarch64_displaced_step_others (const uint32_t insn
,
2695 struct aarch64_insn_data
*data
)
2697 struct aarch64_displaced_step_data
*dsd
2698 = (struct aarch64_displaced_step_data
*) data
;
2700 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2701 dsd
->insn_count
= 1;
2703 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2706 dsd
->dsc
->pc_adjust
= 0;
2709 dsd
->dsc
->pc_adjust
= 4;
2712 static const struct aarch64_insn_visitor visitor
=
2714 aarch64_displaced_step_b
,
2715 aarch64_displaced_step_b_cond
,
2716 aarch64_displaced_step_cb
,
2717 aarch64_displaced_step_tb
,
2718 aarch64_displaced_step_adr
,
2719 aarch64_displaced_step_ldr_literal
,
2720 aarch64_displaced_step_others
,
2723 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2725 struct displaced_step_closure
*
2726 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2727 CORE_ADDR from
, CORE_ADDR to
,
2728 struct regcache
*regs
)
2730 struct displaced_step_closure
*dsc
= NULL
;
2731 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2732 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2733 struct aarch64_displaced_step_data dsd
;
2736 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2739 /* Look for a Load Exclusive instruction which begins the sequence. */
2740 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2742 /* We can't displaced step atomic sequences. */
2746 dsc
= XCNEW (struct displaced_step_closure
);
2747 dsd
.base
.insn_addr
= from
;
2752 aarch64_relocate_instruction (insn
, &visitor
,
2753 (struct aarch64_insn_data
*) &dsd
);
2754 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2756 if (dsd
.insn_count
!= 0)
2760 /* Instruction can be relocated to scratch pad. Copy
2761 relocated instruction(s) there. */
2762 for (i
= 0; i
< dsd
.insn_count
; i
++)
2764 if (debug_displaced
)
2766 debug_printf ("displaced: writing insn ");
2767 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2768 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2770 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2771 (ULONGEST
) dsd
.insn_buf
[i
]);
2783 /* Implement the "displaced_step_fixup" gdbarch method. */
2786 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2787 struct displaced_step_closure
*dsc
,
2788 CORE_ADDR from
, CORE_ADDR to
,
2789 struct regcache
*regs
)
2795 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2798 /* Condition is true. */
2800 else if (pc
- to
== 4)
2802 /* Condition is false. */
2806 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2809 if (dsc
->pc_adjust
!= 0)
2811 if (debug_displaced
)
2813 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2814 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2816 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2817 from
+ dsc
->pc_adjust
);
2821 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2824 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2825 struct displaced_step_closure
*closure
)
2830 /* Initialize the current architecture based on INFO. If possible,
2831 re-use an architecture from ARCHES, which is a list of
2832 architectures already created during this debugging session.
2834 Called e.g. at program startup, when reading a core file, and when
2835 reading a binary file. */
2837 static struct gdbarch
*
2838 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2840 struct gdbarch_tdep
*tdep
;
2841 struct gdbarch
*gdbarch
;
2842 struct gdbarch_list
*best_arch
;
2843 struct tdesc_arch_data
*tdesc_data
= NULL
;
2844 const struct target_desc
*tdesc
= info
.target_desc
;
2847 const struct tdesc_feature
*feature
;
2849 int num_pseudo_regs
= 0;
2851 /* Ensure we always have a target descriptor. */
2852 if (!tdesc_has_registers (tdesc
))
2853 tdesc
= tdesc_aarch64
;
2857 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2859 if (feature
== NULL
)
2862 tdesc_data
= tdesc_data_alloc ();
2864 /* Validate the descriptor provides the mandatory core R registers
2865 and allocate their numbers. */
2866 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2868 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2869 aarch64_r_register_names
[i
]);
2871 num_regs
= AARCH64_X0_REGNUM
+ i
;
2873 /* Look for the V registers. */
2874 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2877 /* Validate the descriptor provides the mandatory V registers
2878 and allocate their numbers. */
2879 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2881 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2882 aarch64_v_register_names
[i
]);
2884 num_regs
= AARCH64_V0_REGNUM
+ i
;
2886 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2887 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2888 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2889 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2890 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2895 tdesc_data_cleanup (tdesc_data
);
2899 /* AArch64 code is always little-endian. */
2900 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2902 /* If there is already a candidate, use it. */
2903 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2905 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2907 /* Found a match. */
2911 if (best_arch
!= NULL
)
2913 if (tdesc_data
!= NULL
)
2914 tdesc_data_cleanup (tdesc_data
);
2915 return best_arch
->gdbarch
;
2918 tdep
= XCNEW (struct gdbarch_tdep
);
2919 gdbarch
= gdbarch_alloc (&info
, tdep
);
2921 /* This should be low enough for everything. */
2922 tdep
->lowest_pc
= 0x20;
2923 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2924 tdep
->jb_elt_size
= 8;
2926 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2927 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2929 /* Frame handling. */
2930 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2931 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2932 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2934 /* Advance PC across function entry code. */
2935 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2937 /* The stack grows downward. */
2938 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2940 /* Breakpoint manipulation. */
2941 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
2942 aarch64_breakpoint::kind_from_pc
);
2943 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
2944 aarch64_breakpoint::bp_from_kind
);
2945 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2946 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2948 /* Information about registers, etc. */
2949 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2950 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2951 set_gdbarch_num_regs (gdbarch
, num_regs
);
2953 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2954 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2955 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2956 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2957 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2958 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2959 aarch64_pseudo_register_reggroup_p
);
2962 set_gdbarch_short_bit (gdbarch
, 16);
2963 set_gdbarch_int_bit (gdbarch
, 32);
2964 set_gdbarch_float_bit (gdbarch
, 32);
2965 set_gdbarch_double_bit (gdbarch
, 64);
2966 set_gdbarch_long_double_bit (gdbarch
, 128);
2967 set_gdbarch_long_bit (gdbarch
, 64);
2968 set_gdbarch_long_long_bit (gdbarch
, 64);
2969 set_gdbarch_ptr_bit (gdbarch
, 64);
2970 set_gdbarch_char_signed (gdbarch
, 0);
2971 set_gdbarch_wchar_signed (gdbarch
, 0);
2972 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2973 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2974 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2976 /* Internal <-> external register number maps. */
2977 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2979 /* Returning results. */
2980 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2983 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2985 /* Virtual tables. */
2986 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2988 /* Hook in the ABI-specific overrides, if they have been registered. */
2989 info
.target_desc
= tdesc
;
2990 info
.tdesc_data
= tdesc_data
;
2991 gdbarch_init_osabi (info
, gdbarch
);
2993 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2995 /* Add some default predicates. */
2996 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2997 dwarf2_append_unwinders (gdbarch
);
2998 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3000 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3002 /* Now we have tuned the configuration, set a few final things,
3003 based on what the OS ABI has told us. */
3005 if (tdep
->jb_pc
>= 0)
3006 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3008 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3010 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3012 /* Add standard register aliases. */
3013 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3014 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3015 value_of_aarch64_user_reg
,
3016 &aarch64_register_aliases
[i
].regnum
);
3022 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3024 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3029 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3030 paddress (gdbarch
, tdep
->lowest_pc
));
3036 static void aarch64_process_record_test (void);
3041 _initialize_aarch64_tdep (void)
3043 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3046 initialize_tdesc_aarch64 ();
3048 /* Debug this file's internals. */
3049 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3050 Set AArch64 debugging."), _("\
3051 Show AArch64 debugging."), _("\
3052 When on, AArch64 specific debugging is enabled."),
3055 &setdebuglist
, &showdebuglist
);
3058 selftests::register_test ("aarch64-analyze-prologue",
3059 selftests::aarch64_analyze_prologue_test
);
3060 selftests::register_test ("aarch64-process-record",
3061 selftests::aarch64_process_record_test
);
3065 /* AArch64 process record-replay related structures, defines etc. */
3067 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3070 unsigned int reg_len = LENGTH; \
3073 REGS = XNEWVEC (uint32_t, reg_len); \
3074 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3079 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3082 unsigned int mem_len = LENGTH; \
3085 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3086 memcpy(&MEMS->len, &RECORD_BUF[0], \
3087 sizeof(struct aarch64_mem_r) * LENGTH); \
3092 /* AArch64 record/replay structures and enumerations. */
3094 struct aarch64_mem_r
3096 uint64_t len
; /* Record length. */
3097 uint64_t addr
; /* Memory address. */
3100 enum aarch64_record_result
3102 AARCH64_RECORD_SUCCESS
,
3103 AARCH64_RECORD_UNSUPPORTED
,
3104 AARCH64_RECORD_UNKNOWN
3107 typedef struct insn_decode_record_t
3109 struct gdbarch
*gdbarch
;
3110 struct regcache
*regcache
;
3111 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3112 uint32_t aarch64_insn
; /* Insn to be recorded. */
3113 uint32_t mem_rec_count
; /* Count of memory records. */
3114 uint32_t reg_rec_count
; /* Count of register records. */
3115 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3116 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3117 } insn_decode_record
;
3119 /* Record handler for data processing - register instructions. */
3122 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3124 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3125 uint32_t record_buf
[4];
3127 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3128 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3129 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3131 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3135 /* Logical (shifted register). */
3136 if (insn_bits24_27
== 0x0a)
3137 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3139 else if (insn_bits24_27
== 0x0b)
3140 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3142 return AARCH64_RECORD_UNKNOWN
;
3144 record_buf
[0] = reg_rd
;
3145 aarch64_insn_r
->reg_rec_count
= 1;
3147 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3151 if (insn_bits24_27
== 0x0b)
3153 /* Data-processing (3 source). */
3154 record_buf
[0] = reg_rd
;
3155 aarch64_insn_r
->reg_rec_count
= 1;
3157 else if (insn_bits24_27
== 0x0a)
3159 if (insn_bits21_23
== 0x00)
3161 /* Add/subtract (with carry). */
3162 record_buf
[0] = reg_rd
;
3163 aarch64_insn_r
->reg_rec_count
= 1;
3164 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3166 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3167 aarch64_insn_r
->reg_rec_count
= 2;
3170 else if (insn_bits21_23
== 0x02)
3172 /* Conditional compare (register) and conditional compare
3173 (immediate) instructions. */
3174 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3175 aarch64_insn_r
->reg_rec_count
= 1;
3177 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3179 /* CConditional select. */
3180 /* Data-processing (2 source). */
3181 /* Data-processing (1 source). */
3182 record_buf
[0] = reg_rd
;
3183 aarch64_insn_r
->reg_rec_count
= 1;
3186 return AARCH64_RECORD_UNKNOWN
;
3190 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3192 return AARCH64_RECORD_SUCCESS
;
3195 /* Record handler for data processing - immediate instructions. */
3198 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3200 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3201 uint32_t record_buf
[4];
3203 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3204 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3205 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3207 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3208 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3209 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3211 record_buf
[0] = reg_rd
;
3212 aarch64_insn_r
->reg_rec_count
= 1;
3214 else if (insn_bits24_27
== 0x01)
3216 /* Add/Subtract (immediate). */
3217 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3218 record_buf
[0] = reg_rd
;
3219 aarch64_insn_r
->reg_rec_count
= 1;
3221 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3223 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3225 /* Logical (immediate). */
3226 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3227 record_buf
[0] = reg_rd
;
3228 aarch64_insn_r
->reg_rec_count
= 1;
3230 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3233 return AARCH64_RECORD_UNKNOWN
;
3235 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3237 return AARCH64_RECORD_SUCCESS
;
3240 /* Record handler for branch, exception generation and system instructions. */
3243 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3245 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3246 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3247 uint32_t record_buf
[4];
3249 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3250 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3251 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3253 if (insn_bits28_31
== 0x0d)
3255 /* Exception generation instructions. */
3256 if (insn_bits24_27
== 0x04)
3258 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3259 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3260 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3262 ULONGEST svc_number
;
3264 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3266 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3270 return AARCH64_RECORD_UNSUPPORTED
;
3272 /* System instructions. */
3273 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3275 uint32_t reg_rt
, reg_crn
;
3277 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3278 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3280 /* Record rt in case of sysl and mrs instructions. */
3281 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3283 record_buf
[0] = reg_rt
;
3284 aarch64_insn_r
->reg_rec_count
= 1;
3286 /* Record cpsr for hint and msr(immediate) instructions. */
3287 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3289 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3290 aarch64_insn_r
->reg_rec_count
= 1;
3293 /* Unconditional branch (register). */
3294 else if((insn_bits24_27
& 0x0e) == 0x06)
3296 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3297 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3298 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3301 return AARCH64_RECORD_UNKNOWN
;
3303 /* Unconditional branch (immediate). */
3304 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3306 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3307 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3308 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3311 /* Compare & branch (immediate), Test & branch (immediate) and
3312 Conditional branch (immediate). */
3313 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3315 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3317 return AARCH64_RECORD_SUCCESS
;
3320 /* Record handler for advanced SIMD load and store instructions. */
3323 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3326 uint64_t addr_offset
= 0;
3327 uint32_t record_buf
[24];
3328 uint64_t record_buf_mem
[24];
3329 uint32_t reg_rn
, reg_rt
;
3330 uint32_t reg_index
= 0, mem_index
= 0;
3331 uint8_t opcode_bits
, size_bits
;
3333 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3334 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3335 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3336 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3337 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3340 debug_printf ("Process record: Advanced SIMD load/store\n");
3342 /* Load/store single structure. */
3343 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3345 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3346 scale
= opcode_bits
>> 2;
3347 selem
= ((opcode_bits
& 0x02) |
3348 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3352 if (size_bits
& 0x01)
3353 return AARCH64_RECORD_UNKNOWN
;
3356 if ((size_bits
>> 1) & 0x01)
3357 return AARCH64_RECORD_UNKNOWN
;
3358 if (size_bits
& 0x01)
3360 if (!((opcode_bits
>> 1) & 0x01))
3363 return AARCH64_RECORD_UNKNOWN
;
3367 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3374 return AARCH64_RECORD_UNKNOWN
;
3380 for (sindex
= 0; sindex
< selem
; sindex
++)
3382 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3383 reg_rt
= (reg_rt
+ 1) % 32;
3387 for (sindex
= 0; sindex
< selem
; sindex
++)
3389 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3390 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3393 record_buf_mem
[mem_index
++] = esize
/ 8;
3394 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3396 addr_offset
= addr_offset
+ (esize
/ 8);
3397 reg_rt
= (reg_rt
+ 1) % 32;
3401 /* Load/store multiple structure. */
3404 uint8_t selem
, esize
, rpt
, elements
;
3405 uint8_t eindex
, rindex
;
3407 esize
= 8 << size_bits
;
3408 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3409 elements
= 128 / esize
;
3411 elements
= 64 / esize
;
3413 switch (opcode_bits
)
3415 /*LD/ST4 (4 Registers). */
3420 /*LD/ST1 (4 Registers). */
3425 /*LD/ST3 (3 Registers). */
3430 /*LD/ST1 (3 Registers). */
3435 /*LD/ST1 (1 Register). */
3440 /*LD/ST2 (2 Registers). */
3445 /*LD/ST1 (2 Registers). */
3451 return AARCH64_RECORD_UNSUPPORTED
;
3454 for (rindex
= 0; rindex
< rpt
; rindex
++)
3455 for (eindex
= 0; eindex
< elements
; eindex
++)
3457 uint8_t reg_tt
, sindex
;
3458 reg_tt
= (reg_rt
+ rindex
) % 32;
3459 for (sindex
= 0; sindex
< selem
; sindex
++)
3461 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3462 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3465 record_buf_mem
[mem_index
++] = esize
/ 8;
3466 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3468 addr_offset
= addr_offset
+ (esize
/ 8);
3469 reg_tt
= (reg_tt
+ 1) % 32;
3474 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3475 record_buf
[reg_index
++] = reg_rn
;
3477 aarch64_insn_r
->reg_rec_count
= reg_index
;
3478 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3479 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3481 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3483 return AARCH64_RECORD_SUCCESS
;
3486 /* Record handler for load and store instructions. */
3489 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3491 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3492 uint8_t insn_bit23
, insn_bit21
;
3493 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3494 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3495 uint64_t datasize
, offset
;
3496 uint32_t record_buf
[8];
3497 uint64_t record_buf_mem
[8];
3500 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3501 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3502 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3503 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3504 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3505 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3506 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3507 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3508 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3509 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3510 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3512 /* Load/store exclusive. */
3513 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3516 debug_printf ("Process record: load/store exclusive\n");
3520 record_buf
[0] = reg_rt
;
3521 aarch64_insn_r
->reg_rec_count
= 1;
3524 record_buf
[1] = reg_rt2
;
3525 aarch64_insn_r
->reg_rec_count
= 2;
3531 datasize
= (8 << size_bits
) * 2;
3533 datasize
= (8 << size_bits
);
3534 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3536 record_buf_mem
[0] = datasize
/ 8;
3537 record_buf_mem
[1] = address
;
3538 aarch64_insn_r
->mem_rec_count
= 1;
3541 /* Save register rs. */
3542 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3543 aarch64_insn_r
->reg_rec_count
= 1;
3547 /* Load register (literal) instructions decoding. */
3548 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3551 debug_printf ("Process record: load register (literal)\n");
3553 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3555 record_buf
[0] = reg_rt
;
3556 aarch64_insn_r
->reg_rec_count
= 1;
3558 /* All types of load/store pair instructions decoding. */
3559 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3562 debug_printf ("Process record: load/store pair\n");
3568 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3569 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3573 record_buf
[0] = reg_rt
;
3574 record_buf
[1] = reg_rt2
;
3576 aarch64_insn_r
->reg_rec_count
= 2;
3581 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3583 size_bits
= size_bits
>> 1;
3584 datasize
= 8 << (2 + size_bits
);
3585 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3586 offset
= offset
<< (2 + size_bits
);
3587 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3589 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3591 if (imm7_off
& 0x40)
3592 address
= address
- offset
;
3594 address
= address
+ offset
;
3597 record_buf_mem
[0] = datasize
/ 8;
3598 record_buf_mem
[1] = address
;
3599 record_buf_mem
[2] = datasize
/ 8;
3600 record_buf_mem
[3] = address
+ (datasize
/ 8);
3601 aarch64_insn_r
->mem_rec_count
= 2;
3603 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3604 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3606 /* Load/store register (unsigned immediate) instructions. */
3607 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3609 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3619 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
3621 /* PRFM (immediate) */
3622 return AARCH64_RECORD_SUCCESS
;
3624 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
3626 /* LDRSW (immediate) */
3640 debug_printf ("Process record: load/store (unsigned immediate):"
3641 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3647 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3648 datasize
= 8 << size_bits
;
3649 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3651 offset
= offset
<< size_bits
;
3652 address
= address
+ offset
;
3654 record_buf_mem
[0] = datasize
>> 3;
3655 record_buf_mem
[1] = address
;
3656 aarch64_insn_r
->mem_rec_count
= 1;
3661 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3663 record_buf
[0] = reg_rt
;
3664 aarch64_insn_r
->reg_rec_count
= 1;
3667 /* Load/store register (register offset) instructions. */
3668 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3669 && insn_bits10_11
== 0x02 && insn_bit21
)
3672 debug_printf ("Process record: load/store (register offset)\n");
3673 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3680 if (size_bits
!= 0x03)
3683 return AARCH64_RECORD_UNKNOWN
;
3687 ULONGEST reg_rm_val
;
3689 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3690 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3691 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3692 offset
= reg_rm_val
<< size_bits
;
3694 offset
= reg_rm_val
;
3695 datasize
= 8 << size_bits
;
3696 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3698 address
= address
+ offset
;
3699 record_buf_mem
[0] = datasize
>> 3;
3700 record_buf_mem
[1] = address
;
3701 aarch64_insn_r
->mem_rec_count
= 1;
3706 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3708 record_buf
[0] = reg_rt
;
3709 aarch64_insn_r
->reg_rec_count
= 1;
3712 /* Load/store register (immediate and unprivileged) instructions. */
3713 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3718 debug_printf ("Process record: load/store "
3719 "(immediate and unprivileged)\n");
3721 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3728 if (size_bits
!= 0x03)
3731 return AARCH64_RECORD_UNKNOWN
;
3736 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3737 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3738 datasize
= 8 << size_bits
;
3739 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3741 if (insn_bits10_11
!= 0x01)
3743 if (imm9_off
& 0x0100)
3744 address
= address
- offset
;
3746 address
= address
+ offset
;
3748 record_buf_mem
[0] = datasize
>> 3;
3749 record_buf_mem
[1] = address
;
3750 aarch64_insn_r
->mem_rec_count
= 1;
3755 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3757 record_buf
[0] = reg_rt
;
3758 aarch64_insn_r
->reg_rec_count
= 1;
3760 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3761 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3763 /* Advanced SIMD load/store instructions. */
3765 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3767 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3769 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3771 return AARCH64_RECORD_SUCCESS
;
3774 /* Record handler for data processing SIMD and floating point instructions. */
3777 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3779 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3780 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3781 uint8_t insn_bits11_14
;
3782 uint32_t record_buf
[2];
3784 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3785 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3786 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3787 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3788 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3789 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3790 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3791 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3792 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3795 debug_printf ("Process record: data processing SIMD/FP: ");
3797 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3799 /* Floating point - fixed point conversion instructions. */
3803 debug_printf ("FP - fixed point conversion");
3805 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3806 record_buf
[0] = reg_rd
;
3808 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3810 /* Floating point - conditional compare instructions. */
3811 else if (insn_bits10_11
== 0x01)
3814 debug_printf ("FP - conditional compare");
3816 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3818 /* Floating point - data processing (2-source) and
3819 conditional select instructions. */
3820 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3823 debug_printf ("FP - DP (2-source)");
3825 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3827 else if (insn_bits10_11
== 0x00)
3829 /* Floating point - immediate instructions. */
3830 if ((insn_bits12_15
& 0x01) == 0x01
3831 || (insn_bits12_15
& 0x07) == 0x04)
3834 debug_printf ("FP - immediate");
3835 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3837 /* Floating point - compare instructions. */
3838 else if ((insn_bits12_15
& 0x03) == 0x02)
3841 debug_printf ("FP - immediate");
3842 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3844 /* Floating point - integer conversions instructions. */
3845 else if (insn_bits12_15
== 0x00)
3847 /* Convert float to integer instruction. */
3848 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3851 debug_printf ("float to int conversion");
3853 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3855 /* Convert integer to float instruction. */
3856 else if ((opcode
>> 1) == 0x01 && !rmode
)
3859 debug_printf ("int to float conversion");
3861 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3863 /* Move float to integer instruction. */
3864 else if ((opcode
>> 1) == 0x03)
3867 debug_printf ("move float to int");
3869 if (!(opcode
& 0x01))
3870 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3872 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3875 return AARCH64_RECORD_UNKNOWN
;
3878 return AARCH64_RECORD_UNKNOWN
;
3881 return AARCH64_RECORD_UNKNOWN
;
3883 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3886 debug_printf ("SIMD copy");
3888 /* Advanced SIMD copy instructions. */
3889 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3890 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3891 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3893 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3894 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3896 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3899 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3901 /* All remaining floating point or advanced SIMD instructions. */
3905 debug_printf ("all remain");
3907 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3911 debug_printf ("\n");
3913 aarch64_insn_r
->reg_rec_count
++;
3914 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3915 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3917 return AARCH64_RECORD_SUCCESS
;
3920 /* Decodes insns type and invokes its record handler. */
3923 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3925 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3927 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3928 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3929 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3930 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3932 /* Data processing - immediate instructions. */
3933 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3934 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3936 /* Branch, exception generation and system instructions. */
3937 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3938 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3940 /* Load and store instructions. */
3941 if (!ins_bit25
&& ins_bit27
)
3942 return aarch64_record_load_store (aarch64_insn_r
);
3944 /* Data processing - register instructions. */
3945 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3946 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3948 /* Data processing - SIMD and floating point instructions. */
3949 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3950 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3952 return AARCH64_RECORD_UNSUPPORTED
;
3955 /* Cleans up local record registers and memory allocations. */
3958 deallocate_reg_mem (insn_decode_record
*record
)
3960 xfree (record
->aarch64_regs
);
3961 xfree (record
->aarch64_mems
);
3965 namespace selftests
{
3968 aarch64_process_record_test (void)
3970 struct gdbarch_info info
;
3973 gdbarch_info_init (&info
);
3974 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
3976 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
3977 SELF_CHECK (gdbarch
!= NULL
);
3979 insn_decode_record aarch64_record
;
3981 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3982 aarch64_record
.regcache
= NULL
;
3983 aarch64_record
.this_addr
= 0;
3984 aarch64_record
.gdbarch
= gdbarch
;
3986 /* 20 00 80 f9 prfm pldl1keep, [x1] */
3987 aarch64_record
.aarch64_insn
= 0xf9800020;
3988 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3989 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
3990 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
3991 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
3993 deallocate_reg_mem (&aarch64_record
);
3996 } // namespace selftests
3997 #endif /* GDB_SELF_TEST */
3999 /* Parse the current instruction and record the values of the registers and
4000 memory that will be changed in current instruction to record_arch_list
4001 return -1 if something is wrong. */
4004 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4005 CORE_ADDR insn_addr
)
4007 uint32_t rec_no
= 0;
4008 uint8_t insn_size
= 4;
4010 gdb_byte buf
[insn_size
];
4011 insn_decode_record aarch64_record
;
4013 memset (&buf
[0], 0, insn_size
);
4014 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4015 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4016 aarch64_record
.aarch64_insn
4017 = (uint32_t) extract_unsigned_integer (&buf
[0],
4019 gdbarch_byte_order (gdbarch
));
4020 aarch64_record
.regcache
= regcache
;
4021 aarch64_record
.this_addr
= insn_addr
;
4022 aarch64_record
.gdbarch
= gdbarch
;
4024 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4025 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4027 printf_unfiltered (_("Process record does not support instruction "
4028 "0x%0x at address %s.\n"),
4029 aarch64_record
.aarch64_insn
,
4030 paddress (gdbarch
, insn_addr
));
4036 /* Record registers. */
4037 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4039 /* Always record register CPSR. */
4040 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4041 AARCH64_CPSR_REGNUM
);
4042 if (aarch64_record
.aarch64_regs
)
4043 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4044 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4045 aarch64_record
.aarch64_regs
[rec_no
]))
4048 /* Record memories. */
4049 if (aarch64_record
.aarch64_mems
)
4050 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4051 if (record_full_arch_list_add_mem
4052 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4053 aarch64_record
.aarch64_mems
[rec_no
].len
))
4056 if (record_full_arch_list_add_end ())
4060 deallocate_reg_mem (&aarch64_record
);