1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
49 #include "aarch64-tdep.h"
52 #include "elf/aarch64.h"
57 #include "record-full.h"
59 #include "features/aarch64.c"
61 #include "arch/aarch64-insn.h"
63 #include "opcode/aarch64.h"
66 #define submask(x) ((1L << ((x) + 1)) - 1)
67 #define bit(obj,st) (((obj) >> (st)) & 1)
68 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
70 /* Pseudo register base numbers. */
71 #define AARCH64_Q0_REGNUM 0
72 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
73 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
77 /* The standard register names, and all the valid aliases for them. */
80 const char *const name
;
82 } aarch64_register_aliases
[] =
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM
},
86 {"lr", AARCH64_LR_REGNUM
},
87 {"sp", AARCH64_SP_REGNUM
},
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM
+ 0},
91 {"w1", AARCH64_X0_REGNUM
+ 1},
92 {"w2", AARCH64_X0_REGNUM
+ 2},
93 {"w3", AARCH64_X0_REGNUM
+ 3},
94 {"w4", AARCH64_X0_REGNUM
+ 4},
95 {"w5", AARCH64_X0_REGNUM
+ 5},
96 {"w6", AARCH64_X0_REGNUM
+ 6},
97 {"w7", AARCH64_X0_REGNUM
+ 7},
98 {"w8", AARCH64_X0_REGNUM
+ 8},
99 {"w9", AARCH64_X0_REGNUM
+ 9},
100 {"w10", AARCH64_X0_REGNUM
+ 10},
101 {"w11", AARCH64_X0_REGNUM
+ 11},
102 {"w12", AARCH64_X0_REGNUM
+ 12},
103 {"w13", AARCH64_X0_REGNUM
+ 13},
104 {"w14", AARCH64_X0_REGNUM
+ 14},
105 {"w15", AARCH64_X0_REGNUM
+ 15},
106 {"w16", AARCH64_X0_REGNUM
+ 16},
107 {"w17", AARCH64_X0_REGNUM
+ 17},
108 {"w18", AARCH64_X0_REGNUM
+ 18},
109 {"w19", AARCH64_X0_REGNUM
+ 19},
110 {"w20", AARCH64_X0_REGNUM
+ 20},
111 {"w21", AARCH64_X0_REGNUM
+ 21},
112 {"w22", AARCH64_X0_REGNUM
+ 22},
113 {"w23", AARCH64_X0_REGNUM
+ 23},
114 {"w24", AARCH64_X0_REGNUM
+ 24},
115 {"w25", AARCH64_X0_REGNUM
+ 25},
116 {"w26", AARCH64_X0_REGNUM
+ 26},
117 {"w27", AARCH64_X0_REGNUM
+ 27},
118 {"w28", AARCH64_X0_REGNUM
+ 28},
119 {"w29", AARCH64_X0_REGNUM
+ 29},
120 {"w30", AARCH64_X0_REGNUM
+ 30},
123 {"ip0", AARCH64_X0_REGNUM
+ 16},
124 {"ip1", AARCH64_X0_REGNUM
+ 17}
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names
[] =
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names
[] =
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
160 /* AArch64 prologue cache structure. */
161 struct aarch64_prologue_cache
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
177 /* Is the target available to read from? */
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
185 /* The register used to hold the frame pointer for this frame. */
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg
*saved_regs
;
193 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
194 struct cmd_list_element
*c
, const char *value
)
196 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
199 /* Abstract instruction reader. */
201 class abstract_instruction_reader
204 /* Read in one instruction. */
205 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
206 enum bfd_endian byte_order
) = 0;
209 /* Instruction reader from real target. */
211 class instruction_reader
: public abstract_instruction_reader
214 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
216 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
220 /* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
225 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
226 CORE_ADDR start
, CORE_ADDR limit
,
227 struct aarch64_prologue_cache
*cache
,
228 abstract_instruction_reader
& reader
)
230 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
232 /* Track X registers and D registers in prologue. */
233 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
234 struct pv_area
*stack
;
235 struct cleanup
*back_to
;
237 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
238 regs
[i
] = pv_register (i
, 0);
239 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
240 back_to
= make_cleanup_free_pv_area (stack
);
242 for (; start
< limit
; start
+= 4)
247 insn
= reader
.read (start
, 4, byte_order_for_code
);
249 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
252 if (inst
.opcode
->iclass
== addsub_imm
253 && (inst
.opcode
->op
== OP_ADD
254 || strcmp ("sub", inst
.opcode
->name
) == 0))
256 unsigned rd
= inst
.operands
[0].reg
.regno
;
257 unsigned rn
= inst
.operands
[1].reg
.regno
;
259 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
260 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
261 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
262 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
264 if (inst
.opcode
->op
== OP_ADD
)
266 regs
[rd
] = pv_add_constant (regs
[rn
],
267 inst
.operands
[2].imm
.value
);
271 regs
[rd
] = pv_add_constant (regs
[rn
],
272 -inst
.operands
[2].imm
.value
);
275 else if (inst
.opcode
->iclass
== pcreladdr
276 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
278 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
279 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
281 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
283 else if (inst
.opcode
->iclass
== branch_imm
)
285 /* Stop analysis on branch. */
288 else if (inst
.opcode
->iclass
== condbranch
)
290 /* Stop analysis on branch. */
293 else if (inst
.opcode
->iclass
== branch_reg
)
295 /* Stop analysis on branch. */
298 else if (inst
.opcode
->iclass
== compbranch
)
300 /* Stop analysis on branch. */
303 else if (inst
.opcode
->op
== OP_MOVZ
)
305 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
306 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
308 else if (inst
.opcode
->iclass
== log_shift
309 && strcmp (inst
.opcode
->name
, "orr") == 0)
311 unsigned rd
= inst
.operands
[0].reg
.regno
;
312 unsigned rn
= inst
.operands
[1].reg
.regno
;
313 unsigned rm
= inst
.operands
[2].reg
.regno
;
315 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
316 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
317 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
319 if (inst
.operands
[2].shifter
.amount
== 0
320 && rn
== AARCH64_SP_REGNUM
)
326 debug_printf ("aarch64: prologue analysis gave up "
327 "addr=%s opcode=0x%x (orr x register)\n",
328 core_addr_to_string_nz (start
), insn
);
333 else if (inst
.opcode
->op
== OP_STUR
)
335 unsigned rt
= inst
.operands
[0].reg
.regno
;
336 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
338 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
340 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
341 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
342 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
343 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
345 pv_area_store (stack
, pv_add_constant (regs
[rn
],
346 inst
.operands
[1].addr
.offset
.imm
),
347 is64
? 8 : 4, regs
[rt
]);
349 else if ((inst
.opcode
->iclass
== ldstpair_off
350 || (inst
.opcode
->iclass
== ldstpair_indexed
351 && inst
.operands
[2].addr
.preind
))
352 && strcmp ("stp", inst
.opcode
->name
) == 0)
354 /* STP with addressing mode Pre-indexed and Base register. */
357 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
358 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
360 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
361 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
362 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
363 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
364 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
365 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
367 /* If recording this store would invalidate the store area
368 (perhaps because rn is not known) then we should abandon
369 further prologue analysis. */
370 if (pv_area_store_would_trash (stack
,
371 pv_add_constant (regs
[rn
], imm
)))
374 if (pv_area_store_would_trash (stack
,
375 pv_add_constant (regs
[rn
], imm
+ 8)))
378 rt1
= inst
.operands
[0].reg
.regno
;
379 rt2
= inst
.operands
[1].reg
.regno
;
380 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
382 /* Only bottom 64-bit of each V register (D register) need
384 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
385 rt1
+= AARCH64_X_REGISTER_COUNT
;
386 rt2
+= AARCH64_X_REGISTER_COUNT
;
389 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
391 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
394 if (inst
.operands
[2].addr
.writeback
)
395 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
398 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
399 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
400 && (inst
.opcode
->op
== OP_STR_POS
401 || inst
.opcode
->op
== OP_STRF_POS
)))
402 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
403 && strcmp ("str", inst
.opcode
->name
) == 0)
405 /* STR (immediate) */
406 unsigned int rt
= inst
.operands
[0].reg
.regno
;
407 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
408 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
410 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
411 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
412 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
414 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
416 /* Only bottom 64-bit of each V register (D register) need
418 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
419 rt
+= AARCH64_X_REGISTER_COUNT
;
422 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
),
423 is64
? 8 : 4, regs
[rt
]);
424 if (inst
.operands
[1].addr
.writeback
)
425 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
427 else if (inst
.opcode
->iclass
== testbranch
)
429 /* Stop analysis on branch. */
436 debug_printf ("aarch64: prologue analysis gave up addr=%s"
438 core_addr_to_string_nz (start
), insn
);
446 do_cleanups (back_to
);
450 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
452 /* Frame pointer is fp. Frame size is constant. */
453 cache
->framereg
= AARCH64_FP_REGNUM
;
454 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
456 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
458 /* Try the stack pointer. */
459 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
460 cache
->framereg
= AARCH64_SP_REGNUM
;
464 /* We're just out of luck. We don't know where the frame is. */
465 cache
->framereg
= -1;
466 cache
->framesize
= 0;
469 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
473 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
474 cache
->saved_regs
[i
].addr
= offset
;
477 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
479 int regnum
= gdbarch_num_regs (gdbarch
);
482 if (pv_area_find_reg (stack
, gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
484 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
487 do_cleanups (back_to
);
492 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
493 CORE_ADDR start
, CORE_ADDR limit
,
494 struct aarch64_prologue_cache
*cache
)
496 instruction_reader reader
;
498 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
504 namespace selftests
{
506 /* Instruction reader from manually cooked instruction sequences. */
508 class instruction_reader_test
: public abstract_instruction_reader
511 template<size_t SIZE
>
512 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
513 : m_insns (insns
), m_insns_size (SIZE
)
516 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
518 SELF_CHECK (len
== 4);
519 SELF_CHECK (memaddr
% 4 == 0);
520 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
522 return m_insns
[memaddr
/ 4];
526 const uint32_t *m_insns
;
531 aarch64_analyze_prologue_test (void)
533 struct gdbarch_info info
;
535 gdbarch_info_init (&info
);
536 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
538 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
539 SELF_CHECK (gdbarch
!= NULL
);
541 /* Test the simple prologue in which frame pointer is used. */
543 struct aarch64_prologue_cache cache
;
544 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
546 static const uint32_t insns
[] = {
547 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
548 0x910003fd, /* mov x29, sp */
549 0x97ffffe6, /* bl 0x400580 */
551 instruction_reader_test
reader (insns
);
553 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
554 SELF_CHECK (end
== 4 * 2);
556 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
557 SELF_CHECK (cache
.framesize
== 272);
559 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
561 if (i
== AARCH64_FP_REGNUM
)
562 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
563 else if (i
== AARCH64_LR_REGNUM
)
564 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
566 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
569 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
571 int regnum
= gdbarch_num_regs (gdbarch
);
573 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
578 /* Test a prologue in which STR is used and frame pointer is not
581 struct aarch64_prologue_cache cache
;
582 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
584 static const uint32_t insns
[] = {
585 0xf81d0ff3, /* str x19, [sp, #-48]! */
586 0xb9002fe0, /* str w0, [sp, #44] */
587 0xf90013e1, /* str x1, [sp, #32]*/
588 0xfd000fe0, /* str d0, [sp, #24] */
589 0xaa0203f3, /* mov x19, x2 */
590 0xf94013e0, /* ldr x0, [sp, #32] */
592 instruction_reader_test
reader (insns
);
594 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
596 SELF_CHECK (end
== 4 * 5);
598 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
599 SELF_CHECK (cache
.framesize
== 48);
601 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
604 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
606 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
608 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
611 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
613 int regnum
= gdbarch_num_regs (gdbarch
);
616 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
619 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
624 } // namespace selftests
625 #endif /* GDB_SELF_TEST */
627 /* Implement the "skip_prologue" gdbarch method. */
630 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
632 CORE_ADDR func_addr
, limit_pc
;
634 /* See if we can determine the end of the prologue via the symbol
635 table. If so, then return either PC, or the PC after the
636 prologue, whichever is greater. */
637 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
639 CORE_ADDR post_prologue_pc
640 = skip_prologue_using_sal (gdbarch
, func_addr
);
642 if (post_prologue_pc
!= 0)
643 return std::max (pc
, post_prologue_pc
);
646 /* Can't determine prologue from the symbol table, need to examine
649 /* Find an upper limit on the function prologue using the debug
650 information. If the debug information could not be used to
651 provide that bound, then use an arbitrary large number as the
653 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
655 limit_pc
= pc
+ 128; /* Magic. */
657 /* Try disassembling prologue. */
658 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
661 /* Scan the function prologue for THIS_FRAME and populate the prologue
665 aarch64_scan_prologue (struct frame_info
*this_frame
,
666 struct aarch64_prologue_cache
*cache
)
668 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
669 CORE_ADDR prologue_start
;
670 CORE_ADDR prologue_end
;
671 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
672 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
674 cache
->prev_pc
= prev_pc
;
676 /* Assume we do not find a frame. */
677 cache
->framereg
= -1;
678 cache
->framesize
= 0;
680 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
683 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
687 /* No line info so use the current PC. */
688 prologue_end
= prev_pc
;
690 else if (sal
.end
< prologue_end
)
692 /* The next line begins after the function end. */
693 prologue_end
= sal
.end
;
696 prologue_end
= std::min (prologue_end
, prev_pc
);
697 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
703 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
707 cache
->framereg
= AARCH64_FP_REGNUM
;
708 cache
->framesize
= 16;
709 cache
->saved_regs
[29].addr
= 0;
710 cache
->saved_regs
[30].addr
= 8;
714 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
715 function may throw an exception if the inferior's registers or memory is
719 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
720 struct aarch64_prologue_cache
*cache
)
722 CORE_ADDR unwound_fp
;
725 aarch64_scan_prologue (this_frame
, cache
);
727 if (cache
->framereg
== -1)
730 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
734 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
736 /* Calculate actual addresses of saved registers using offsets
737 determined by aarch64_analyze_prologue. */
738 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
739 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
740 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
742 cache
->func
= get_frame_func (this_frame
);
744 cache
->available_p
= 1;
747 /* Allocate and fill in *THIS_CACHE with information about the prologue of
748 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
749 Return a pointer to the current aarch64_prologue_cache in
752 static struct aarch64_prologue_cache
*
753 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
755 struct aarch64_prologue_cache
*cache
;
757 if (*this_cache
!= NULL
)
758 return (struct aarch64_prologue_cache
*) *this_cache
;
760 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
761 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
766 aarch64_make_prologue_cache_1 (this_frame
, cache
);
768 CATCH (ex
, RETURN_MASK_ERROR
)
770 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
771 throw_exception (ex
);
778 /* Implement the "stop_reason" frame_unwind method. */
780 static enum unwind_stop_reason
781 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
784 struct aarch64_prologue_cache
*cache
785 = aarch64_make_prologue_cache (this_frame
, this_cache
);
787 if (!cache
->available_p
)
788 return UNWIND_UNAVAILABLE
;
790 /* Halt the backtrace at "_start". */
791 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
792 return UNWIND_OUTERMOST
;
794 /* We've hit a wall, stop. */
795 if (cache
->prev_sp
== 0)
796 return UNWIND_OUTERMOST
;
798 return UNWIND_NO_REASON
;
801 /* Our frame ID for a normal frame is the current function's starting
802 PC and the caller's SP when we were called. */
805 aarch64_prologue_this_id (struct frame_info
*this_frame
,
806 void **this_cache
, struct frame_id
*this_id
)
808 struct aarch64_prologue_cache
*cache
809 = aarch64_make_prologue_cache (this_frame
, this_cache
);
811 if (!cache
->available_p
)
812 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
814 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
817 /* Implement the "prev_register" frame_unwind method. */
819 static struct value
*
820 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
821 void **this_cache
, int prev_regnum
)
823 struct aarch64_prologue_cache
*cache
824 = aarch64_make_prologue_cache (this_frame
, this_cache
);
826 /* If we are asked to unwind the PC, then we need to return the LR
827 instead. The prologue may save PC, but it will point into this
828 frame's prologue, not the next frame's resume location. */
829 if (prev_regnum
== AARCH64_PC_REGNUM
)
833 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
834 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
837 /* SP is generally not saved to the stack, but this frame is
838 identified by the next frame's stack pointer at the time of the
839 call. The value was already reconstructed into PREV_SP. */
852 if (prev_regnum
== AARCH64_SP_REGNUM
)
853 return frame_unwind_got_constant (this_frame
, prev_regnum
,
856 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
860 /* AArch64 prologue unwinder. */
861 struct frame_unwind aarch64_prologue_unwind
=
864 aarch64_prologue_frame_unwind_stop_reason
,
865 aarch64_prologue_this_id
,
866 aarch64_prologue_prev_register
,
868 default_frame_sniffer
871 /* Allocate and fill in *THIS_CACHE with information about the prologue of
872 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
873 Return a pointer to the current aarch64_prologue_cache in
876 static struct aarch64_prologue_cache
*
877 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
879 struct aarch64_prologue_cache
*cache
;
881 if (*this_cache
!= NULL
)
882 return (struct aarch64_prologue_cache
*) *this_cache
;
884 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
885 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
890 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
892 cache
->prev_pc
= get_frame_pc (this_frame
);
893 cache
->available_p
= 1;
895 CATCH (ex
, RETURN_MASK_ERROR
)
897 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
898 throw_exception (ex
);
905 /* Implement the "stop_reason" frame_unwind method. */
907 static enum unwind_stop_reason
908 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
911 struct aarch64_prologue_cache
*cache
912 = aarch64_make_stub_cache (this_frame
, this_cache
);
914 if (!cache
->available_p
)
915 return UNWIND_UNAVAILABLE
;
917 return UNWIND_NO_REASON
;
920 /* Our frame ID for a stub frame is the current SP and LR. */
923 aarch64_stub_this_id (struct frame_info
*this_frame
,
924 void **this_cache
, struct frame_id
*this_id
)
926 struct aarch64_prologue_cache
*cache
927 = aarch64_make_stub_cache (this_frame
, this_cache
);
929 if (cache
->available_p
)
930 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
932 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
935 /* Implement the "sniffer" frame_unwind method. */
938 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
939 struct frame_info
*this_frame
,
940 void **this_prologue_cache
)
942 CORE_ADDR addr_in_block
;
945 addr_in_block
= get_frame_address_in_block (this_frame
);
946 if (in_plt_section (addr_in_block
)
947 /* We also use the stub winder if the target memory is unreadable
948 to avoid having the prologue unwinder trying to read it. */
949 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
955 /* AArch64 stub unwinder. */
956 struct frame_unwind aarch64_stub_unwind
=
959 aarch64_stub_frame_unwind_stop_reason
,
960 aarch64_stub_this_id
,
961 aarch64_prologue_prev_register
,
963 aarch64_stub_unwind_sniffer
966 /* Return the frame base address of *THIS_FRAME. */
969 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
971 struct aarch64_prologue_cache
*cache
972 = aarch64_make_prologue_cache (this_frame
, this_cache
);
974 return cache
->prev_sp
- cache
->framesize
;
977 /* AArch64 default frame base information. */
978 struct frame_base aarch64_normal_base
=
980 &aarch64_prologue_unwind
,
981 aarch64_normal_frame_base
,
982 aarch64_normal_frame_base
,
983 aarch64_normal_frame_base
986 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
987 dummy frame. The frame ID's base needs to match the TOS value
988 saved by save_dummy_frame_tos () and returned from
989 aarch64_push_dummy_call, and the PC needs to match the dummy
990 frame's breakpoint. */
992 static struct frame_id
993 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
995 return frame_id_build (get_frame_register_unsigned (this_frame
,
997 get_frame_pc (this_frame
));
1000 /* Implement the "unwind_pc" gdbarch method. */
1003 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1006 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1011 /* Implement the "unwind_sp" gdbarch method. */
1014 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1016 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1019 /* Return the value of the REGNUM register in the previous frame of
1022 static struct value
*
1023 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1024 void **this_cache
, int regnum
)
1030 case AARCH64_PC_REGNUM
:
1031 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1032 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1035 internal_error (__FILE__
, __LINE__
,
1036 _("Unexpected register %d"), regnum
);
1040 /* Implement the "init_reg" dwarf2_frame_ops method. */
1043 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1044 struct dwarf2_frame_state_reg
*reg
,
1045 struct frame_info
*this_frame
)
1049 case AARCH64_PC_REGNUM
:
1050 reg
->how
= DWARF2_FRAME_REG_FN
;
1051 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1053 case AARCH64_SP_REGNUM
:
1054 reg
->how
= DWARF2_FRAME_REG_CFA
;
1059 /* When arguments must be pushed onto the stack, they go on in reverse
1060 order. The code below implements a FILO (stack) to do this. */
1064 /* Value to pass on stack. It can be NULL if this item is for stack
1066 const gdb_byte
*data
;
1068 /* Size in bytes of value to pass on stack. */
1072 DEF_VEC_O (stack_item_t
);
1074 /* Return the alignment (in bytes) of the given type. */
1077 aarch64_type_align (struct type
*t
)
1083 t
= check_typedef (t
);
1084 switch (TYPE_CODE (t
))
1087 /* Should never happen. */
1088 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1092 case TYPE_CODE_ENUM
:
1096 case TYPE_CODE_RANGE
:
1097 case TYPE_CODE_BITSTRING
:
1099 case TYPE_CODE_RVALUE_REF
:
1100 case TYPE_CODE_CHAR
:
1101 case TYPE_CODE_BOOL
:
1102 return TYPE_LENGTH (t
);
1104 case TYPE_CODE_ARRAY
:
1105 if (TYPE_VECTOR (t
))
1107 /* Use the natural alignment for vector types (the same for
1108 scalar type), but the maximum alignment is 128-bit. */
1109 if (TYPE_LENGTH (t
) > 16)
1112 return TYPE_LENGTH (t
);
1115 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1116 case TYPE_CODE_COMPLEX
:
1117 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1119 case TYPE_CODE_STRUCT
:
1120 case TYPE_CODE_UNION
:
1122 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1124 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1132 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1133 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1134 document; otherwise return 0. */
1137 is_hfa_or_hva (struct type
*ty
)
1139 switch (TYPE_CODE (ty
))
1141 case TYPE_CODE_ARRAY
:
1143 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1145 if (TYPE_VECTOR (ty
))
1148 if (TYPE_LENGTH (ty
) <= 4 /* HFA or HVA has at most 4 members. */
1149 && (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
/* HFA */
1150 || (TYPE_CODE (target_ty
) == TYPE_CODE_ARRAY
/* HVA */
1151 && TYPE_VECTOR (target_ty
))))
1156 case TYPE_CODE_UNION
:
1157 case TYPE_CODE_STRUCT
:
1159 /* HFA or HVA has at most four members. */
1160 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1162 struct type
*member0_type
;
1164 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1165 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
1166 || (TYPE_CODE (member0_type
) == TYPE_CODE_ARRAY
1167 && TYPE_VECTOR (member0_type
)))
1171 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1173 struct type
*member1_type
;
1175 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1176 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1177 || (TYPE_LENGTH (member0_type
)
1178 != TYPE_LENGTH (member1_type
)))
1194 /* AArch64 function call information structure. */
1195 struct aarch64_call_info
1197 /* the current argument number. */
1200 /* The next general purpose register number, equivalent to NGRN as
1201 described in the AArch64 Procedure Call Standard. */
1204 /* The next SIMD and floating point register number, equivalent to
1205 NSRN as described in the AArch64 Procedure Call Standard. */
1208 /* The next stacked argument address, equivalent to NSAA as
1209 described in the AArch64 Procedure Call Standard. */
1212 /* Stack item vector. */
1213 VEC(stack_item_t
) *si
;
1216 /* Pass a value in a sequence of consecutive X registers. The caller
1217 is responsbile for ensuring sufficient registers are available. */
1220 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1221 struct aarch64_call_info
*info
, struct type
*type
,
1224 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1225 int len
= TYPE_LENGTH (type
);
1226 enum type_code typecode
= TYPE_CODE (type
);
1227 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1228 const bfd_byte
*buf
= value_contents (arg
);
1234 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1235 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1239 /* Adjust sub-word struct/union args when big-endian. */
1240 if (byte_order
== BFD_ENDIAN_BIG
1241 && partial_len
< X_REGISTER_SIZE
1242 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1243 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1247 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1248 gdbarch_register_name (gdbarch
, regnum
),
1249 phex (regval
, X_REGISTER_SIZE
));
1251 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1258 /* Attempt to marshall a value in a V register. Return 1 if
1259 successful, or 0 if insufficient registers are available. This
1260 function, unlike the equivalent pass_in_x() function does not
1261 handle arguments spread across multiple registers. */
1264 pass_in_v (struct gdbarch
*gdbarch
,
1265 struct regcache
*regcache
,
1266 struct aarch64_call_info
*info
,
1267 int len
, const bfd_byte
*buf
)
1271 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1272 gdb_byte reg
[V_REGISTER_SIZE
];
1277 memset (reg
, 0, sizeof (reg
));
1278 /* PCS C.1, the argument is allocated to the least significant
1279 bits of V register. */
1280 memcpy (reg
, buf
, len
);
1281 regcache_cooked_write (regcache
, regnum
, reg
);
1285 debug_printf ("arg %d in %s\n", info
->argnum
,
1286 gdbarch_register_name (gdbarch
, regnum
));
1294 /* Marshall an argument onto the stack. */
1297 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1300 const bfd_byte
*buf
= value_contents (arg
);
1301 int len
= TYPE_LENGTH (type
);
1307 align
= aarch64_type_align (type
);
1309 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1310 Natural alignment of the argument's type. */
1311 align
= align_up (align
, 8);
1313 /* The AArch64 PCS requires at most doubleword alignment. */
1319 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1325 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1328 if (info
->nsaa
& (align
- 1))
1330 /* Push stack alignment padding. */
1331 int pad
= align
- (info
->nsaa
& (align
- 1));
1336 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1341 /* Marshall an argument into a sequence of one or more consecutive X
1342 registers or, if insufficient X registers are available then onto
1346 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1347 struct aarch64_call_info
*info
, struct type
*type
,
1350 int len
= TYPE_LENGTH (type
);
1351 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1353 /* PCS C.13 - Pass in registers if we have enough spare */
1354 if (info
->ngrn
+ nregs
<= 8)
1356 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1357 info
->ngrn
+= nregs
;
1362 pass_on_stack (info
, type
, arg
);
1366 /* Pass a value in a V register, or on the stack if insufficient are
1370 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1371 struct regcache
*regcache
,
1372 struct aarch64_call_info
*info
,
1376 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (type
),
1377 value_contents (arg
)))
1378 pass_on_stack (info
, type
, arg
);
1381 /* Implement the "push_dummy_call" gdbarch method. */
1384 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1385 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1387 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1388 CORE_ADDR struct_addr
)
1391 struct aarch64_call_info info
;
1392 struct type
*func_type
;
1393 struct type
*return_type
;
1394 int lang_struct_return
;
1396 memset (&info
, 0, sizeof (info
));
1398 /* We need to know what the type of the called function is in order
1399 to determine the number of named/anonymous arguments for the
1400 actual argument placement, and the return type in order to handle
1401 return value correctly.
1403 The generic code above us views the decision of return in memory
1404 or return in registers as a two stage processes. The language
1405 handler is consulted first and may decide to return in memory (eg
1406 class with copy constructor returned by value), this will cause
1407 the generic code to allocate space AND insert an initial leading
1410 If the language code does not decide to pass in memory then the
1411 target code is consulted.
1413 If the language code decides to pass in memory we want to move
1414 the pointer inserted as the initial argument from the argument
1415 list and into X8, the conventional AArch64 struct return pointer
1418 This is slightly awkward, ideally the flag "lang_struct_return"
1419 would be passed to the targets implementation of push_dummy_call.
1420 Rather that change the target interface we call the language code
1421 directly ourselves. */
1423 func_type
= check_typedef (value_type (function
));
1425 /* Dereference function pointer types. */
1426 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1427 func_type
= TYPE_TARGET_TYPE (func_type
);
1429 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1430 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1432 /* If language_pass_by_reference () returned true we will have been
1433 given an additional initial argument, a hidden pointer to the
1434 return slot in memory. */
1435 return_type
= TYPE_TARGET_TYPE (func_type
);
1436 lang_struct_return
= language_pass_by_reference (return_type
);
1438 /* Set the return address. For the AArch64, the return breakpoint
1439 is always at BP_ADDR. */
1440 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1442 /* If we were given an initial argument for the return slot because
1443 lang_struct_return was true, lose it. */
1444 if (lang_struct_return
)
1450 /* The struct_return pointer occupies X8. */
1451 if (struct_return
|| lang_struct_return
)
1455 debug_printf ("struct return in %s = 0x%s\n",
1456 gdbarch_register_name (gdbarch
,
1457 AARCH64_STRUCT_RETURN_REGNUM
),
1458 paddress (gdbarch
, struct_addr
));
1460 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1464 for (argnum
= 0; argnum
< nargs
; argnum
++)
1466 struct value
*arg
= args
[argnum
];
1467 struct type
*arg_type
;
1470 arg_type
= check_typedef (value_type (arg
));
1471 len
= TYPE_LENGTH (arg_type
);
1473 switch (TYPE_CODE (arg_type
))
1476 case TYPE_CODE_BOOL
:
1477 case TYPE_CODE_CHAR
:
1478 case TYPE_CODE_RANGE
:
1479 case TYPE_CODE_ENUM
:
1482 /* Promote to 32 bit integer. */
1483 if (TYPE_UNSIGNED (arg_type
))
1484 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1486 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1487 arg
= value_cast (arg_type
, arg
);
1489 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1492 case TYPE_CODE_COMPLEX
:
1495 const bfd_byte
*buf
= value_contents (arg
);
1496 struct type
*target_type
=
1497 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1499 pass_in_v (gdbarch
, regcache
, &info
,
1500 TYPE_LENGTH (target_type
), buf
);
1501 pass_in_v (gdbarch
, regcache
, &info
,
1502 TYPE_LENGTH (target_type
),
1503 buf
+ TYPE_LENGTH (target_type
));
1508 pass_on_stack (&info
, arg_type
, arg
);
1512 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1515 case TYPE_CODE_STRUCT
:
1516 case TYPE_CODE_ARRAY
:
1517 case TYPE_CODE_UNION
:
1518 if (is_hfa_or_hva (arg_type
))
1520 int elements
= TYPE_NFIELDS (arg_type
);
1522 /* Homogeneous Aggregates */
1523 if (info
.nsrn
+ elements
< 8)
1527 for (i
= 0; i
< elements
; i
++)
1529 /* We know that we have sufficient registers
1530 available therefore this will never fallback
1532 struct value
*field
=
1533 value_primitive_field (arg
, 0, i
, arg_type
);
1534 struct type
*field_type
=
1535 check_typedef (value_type (field
));
1537 pass_in_v_or_stack (gdbarch
, regcache
, &info
,
1544 pass_on_stack (&info
, arg_type
, arg
);
1547 else if (TYPE_CODE (arg_type
) == TYPE_CODE_ARRAY
1548 && TYPE_VECTOR (arg_type
) && (len
== 16 || len
== 8))
1550 /* Short vector types are passed in V registers. */
1551 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1555 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1556 invisible reference. */
1558 /* Allocate aligned storage. */
1559 sp
= align_down (sp
- len
, 16);
1561 /* Write the real data into the stack. */
1562 write_memory (sp
, value_contents (arg
), len
);
1564 /* Construct the indirection. */
1565 arg_type
= lookup_pointer_type (arg_type
);
1566 arg
= value_from_pointer (arg_type
, sp
);
1567 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1570 /* PCS C.15 / C.18 multiple values pass. */
1571 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1575 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1580 /* Make sure stack retains 16 byte alignment. */
1582 sp
-= 16 - (info
.nsaa
& 15);
1584 while (!VEC_empty (stack_item_t
, info
.si
))
1586 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1589 if (si
->data
!= NULL
)
1590 write_memory (sp
, si
->data
, si
->len
);
1591 VEC_pop (stack_item_t
, info
.si
);
1594 VEC_free (stack_item_t
, info
.si
);
1596 /* Finally, update the SP register. */
1597 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1602 /* Implement the "frame_align" gdbarch method. */
1605 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1607 /* Align the stack to sixteen bytes. */
1608 return sp
& ~(CORE_ADDR
) 15;
1611 /* Return the type for an AdvSISD Q register. */
1613 static struct type
*
1614 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1616 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1618 if (tdep
->vnq_type
== NULL
)
1623 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1626 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1627 append_composite_type_field (t
, "u", elem
);
1629 elem
= builtin_type (gdbarch
)->builtin_int128
;
1630 append_composite_type_field (t
, "s", elem
);
1635 return tdep
->vnq_type
;
1638 /* Return the type for an AdvSISD D register. */
1640 static struct type
*
1641 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1643 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1645 if (tdep
->vnd_type
== NULL
)
1650 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1653 elem
= builtin_type (gdbarch
)->builtin_double
;
1654 append_composite_type_field (t
, "f", elem
);
1656 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1657 append_composite_type_field (t
, "u", elem
);
1659 elem
= builtin_type (gdbarch
)->builtin_int64
;
1660 append_composite_type_field (t
, "s", elem
);
1665 return tdep
->vnd_type
;
1668 /* Return the type for an AdvSISD S register. */
1670 static struct type
*
1671 aarch64_vns_type (struct gdbarch
*gdbarch
)
1673 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1675 if (tdep
->vns_type
== NULL
)
1680 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1683 elem
= builtin_type (gdbarch
)->builtin_float
;
1684 append_composite_type_field (t
, "f", elem
);
1686 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1687 append_composite_type_field (t
, "u", elem
);
1689 elem
= builtin_type (gdbarch
)->builtin_int32
;
1690 append_composite_type_field (t
, "s", elem
);
1695 return tdep
->vns_type
;
1698 /* Return the type for an AdvSISD H register. */
1700 static struct type
*
1701 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1703 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1705 if (tdep
->vnh_type
== NULL
)
1710 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1713 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1714 append_composite_type_field (t
, "u", elem
);
1716 elem
= builtin_type (gdbarch
)->builtin_int16
;
1717 append_composite_type_field (t
, "s", elem
);
1722 return tdep
->vnh_type
;
1725 /* Return the type for an AdvSISD B register. */
1727 static struct type
*
1728 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1730 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1732 if (tdep
->vnb_type
== NULL
)
1737 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1740 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1741 append_composite_type_field (t
, "u", elem
);
1743 elem
= builtin_type (gdbarch
)->builtin_int8
;
1744 append_composite_type_field (t
, "s", elem
);
1749 return tdep
->vnb_type
;
1752 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1755 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1757 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1758 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1760 if (reg
== AARCH64_DWARF_SP
)
1761 return AARCH64_SP_REGNUM
;
1763 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1764 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1770 /* Implement the "print_insn" gdbarch method. */
1773 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1775 info
->symbols
= NULL
;
1776 return print_insn_aarch64 (memaddr
, info
);
1779 /* AArch64 BRK software debug mode instruction.
1780 Note that AArch64 code is always little-endian.
1781 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1782 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1784 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
1786 /* Extract from an array REGS containing the (raw) register state a
1787 function return value of type TYPE, and copy that, in virtual
1788 format, into VALBUF. */
1791 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1794 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1795 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1797 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1799 bfd_byte buf
[V_REGISTER_SIZE
];
1800 int len
= TYPE_LENGTH (type
);
1802 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1803 memcpy (valbuf
, buf
, len
);
1805 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1806 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1807 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1808 || TYPE_CODE (type
) == TYPE_CODE_PTR
1809 || TYPE_IS_REFERENCE (type
)
1810 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1812 /* If the the type is a plain integer, then the access is
1813 straight-forward. Otherwise we have to play around a bit
1815 int len
= TYPE_LENGTH (type
);
1816 int regno
= AARCH64_X0_REGNUM
;
1821 /* By using store_unsigned_integer we avoid having to do
1822 anything special for small big-endian values. */
1823 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1824 store_unsigned_integer (valbuf
,
1825 (len
> X_REGISTER_SIZE
1826 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1827 len
-= X_REGISTER_SIZE
;
1828 valbuf
+= X_REGISTER_SIZE
;
1831 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
1833 int regno
= AARCH64_V0_REGNUM
;
1834 bfd_byte buf
[V_REGISTER_SIZE
];
1835 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1836 int len
= TYPE_LENGTH (target_type
);
1838 regcache_cooked_read (regs
, regno
, buf
);
1839 memcpy (valbuf
, buf
, len
);
1841 regcache_cooked_read (regs
, regno
+ 1, buf
);
1842 memcpy (valbuf
, buf
, len
);
1845 else if (is_hfa_or_hva (type
))
1847 int elements
= TYPE_NFIELDS (type
);
1848 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1849 int len
= TYPE_LENGTH (member_type
);
1852 for (i
= 0; i
< elements
; i
++)
1854 int regno
= AARCH64_V0_REGNUM
+ i
;
1855 bfd_byte buf
[V_REGISTER_SIZE
];
1859 debug_printf ("read HFA or HVA return value element %d from %s\n",
1861 gdbarch_register_name (gdbarch
, regno
));
1863 regcache_cooked_read (regs
, regno
, buf
);
1865 memcpy (valbuf
, buf
, len
);
1869 else if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (type
)
1870 && (TYPE_LENGTH (type
) == 16 || TYPE_LENGTH (type
) == 8))
1872 /* Short vector is returned in V register. */
1873 gdb_byte buf
[V_REGISTER_SIZE
];
1875 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1876 memcpy (valbuf
, buf
, TYPE_LENGTH (type
));
1880 /* For a structure or union the behaviour is as if the value had
1881 been stored to word-aligned memory and then loaded into
1882 registers with 64-bit load instruction(s). */
1883 int len
= TYPE_LENGTH (type
);
1884 int regno
= AARCH64_X0_REGNUM
;
1885 bfd_byte buf
[X_REGISTER_SIZE
];
1889 regcache_cooked_read (regs
, regno
++, buf
);
1890 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1891 len
-= X_REGISTER_SIZE
;
1892 valbuf
+= X_REGISTER_SIZE
;
1898 /* Will a function return an aggregate type in memory or in a
1899 register? Return 0 if an aggregate type can be returned in a
1900 register, 1 if it must be returned in memory. */
1903 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
1905 type
= check_typedef (type
);
1907 if (is_hfa_or_hva (type
))
1909 /* v0-v7 are used to return values and one register is allocated
1910 for one member. However, HFA or HVA has at most four members. */
1914 if (TYPE_LENGTH (type
) > 16)
1916 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1917 invisible reference. */
1925 /* Write into appropriate registers a function return value of type
1926 TYPE, given in virtual format. */
1929 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
1930 const gdb_byte
*valbuf
)
1932 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1933 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1935 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1937 bfd_byte buf
[V_REGISTER_SIZE
];
1938 int len
= TYPE_LENGTH (type
);
1940 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
1941 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
1943 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1944 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1945 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1946 || TYPE_CODE (type
) == TYPE_CODE_PTR
1947 || TYPE_IS_REFERENCE (type
)
1948 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1950 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
1952 /* Values of one word or less are zero/sign-extended and
1954 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
1955 LONGEST val
= unpack_long (type
, valbuf
);
1957 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
1958 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
1962 /* Integral values greater than one word are stored in
1963 consecutive registers starting with r0. This will always
1964 be a multiple of the regiser size. */
1965 int len
= TYPE_LENGTH (type
);
1966 int regno
= AARCH64_X0_REGNUM
;
1970 regcache_cooked_write (regs
, regno
++, valbuf
);
1971 len
-= X_REGISTER_SIZE
;
1972 valbuf
+= X_REGISTER_SIZE
;
1976 else if (is_hfa_or_hva (type
))
1978 int elements
= TYPE_NFIELDS (type
);
1979 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1980 int len
= TYPE_LENGTH (member_type
);
1983 for (i
= 0; i
< elements
; i
++)
1985 int regno
= AARCH64_V0_REGNUM
+ i
;
1986 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
1990 debug_printf ("write HFA or HVA return value element %d to %s\n",
1992 gdbarch_register_name (gdbarch
, regno
));
1995 memcpy (tmpbuf
, valbuf
, len
);
1996 regcache_cooked_write (regs
, regno
, tmpbuf
);
2000 else if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (type
)
2001 && (TYPE_LENGTH (type
) == 8 || TYPE_LENGTH (type
) == 16))
2004 gdb_byte buf
[V_REGISTER_SIZE
];
2006 memcpy (buf
, valbuf
, TYPE_LENGTH (type
));
2007 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2011 /* For a structure or union the behaviour is as if the value had
2012 been stored to word-aligned memory and then loaded into
2013 registers with 64-bit load instruction(s). */
2014 int len
= TYPE_LENGTH (type
);
2015 int regno
= AARCH64_X0_REGNUM
;
2016 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2020 memcpy (tmpbuf
, valbuf
,
2021 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2022 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2023 len
-= X_REGISTER_SIZE
;
2024 valbuf
+= X_REGISTER_SIZE
;
2029 /* Implement the "return_value" gdbarch method. */
2031 static enum return_value_convention
2032 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2033 struct type
*valtype
, struct regcache
*regcache
,
2034 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2037 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2038 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2039 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2041 if (aarch64_return_in_memory (gdbarch
, valtype
))
2044 debug_printf ("return value in memory\n");
2045 return RETURN_VALUE_STRUCT_CONVENTION
;
2050 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2053 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2056 debug_printf ("return value in registers\n");
2058 return RETURN_VALUE_REGISTER_CONVENTION
;
2061 /* Implement the "get_longjmp_target" gdbarch method. */
2064 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2067 gdb_byte buf
[X_REGISTER_SIZE
];
2068 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2069 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2070 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2072 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2074 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2078 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2082 /* Implement the "gen_return_address" gdbarch method. */
2085 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2086 struct agent_expr
*ax
, struct axs_value
*value
,
2089 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2090 value
->kind
= axs_lvalue_register
;
2091 value
->u
.reg
= AARCH64_LR_REGNUM
;
2095 /* Return the pseudo register name corresponding to register regnum. */
2098 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2100 static const char *const q_name
[] =
2102 "q0", "q1", "q2", "q3",
2103 "q4", "q5", "q6", "q7",
2104 "q8", "q9", "q10", "q11",
2105 "q12", "q13", "q14", "q15",
2106 "q16", "q17", "q18", "q19",
2107 "q20", "q21", "q22", "q23",
2108 "q24", "q25", "q26", "q27",
2109 "q28", "q29", "q30", "q31",
2112 static const char *const d_name
[] =
2114 "d0", "d1", "d2", "d3",
2115 "d4", "d5", "d6", "d7",
2116 "d8", "d9", "d10", "d11",
2117 "d12", "d13", "d14", "d15",
2118 "d16", "d17", "d18", "d19",
2119 "d20", "d21", "d22", "d23",
2120 "d24", "d25", "d26", "d27",
2121 "d28", "d29", "d30", "d31",
2124 static const char *const s_name
[] =
2126 "s0", "s1", "s2", "s3",
2127 "s4", "s5", "s6", "s7",
2128 "s8", "s9", "s10", "s11",
2129 "s12", "s13", "s14", "s15",
2130 "s16", "s17", "s18", "s19",
2131 "s20", "s21", "s22", "s23",
2132 "s24", "s25", "s26", "s27",
2133 "s28", "s29", "s30", "s31",
2136 static const char *const h_name
[] =
2138 "h0", "h1", "h2", "h3",
2139 "h4", "h5", "h6", "h7",
2140 "h8", "h9", "h10", "h11",
2141 "h12", "h13", "h14", "h15",
2142 "h16", "h17", "h18", "h19",
2143 "h20", "h21", "h22", "h23",
2144 "h24", "h25", "h26", "h27",
2145 "h28", "h29", "h30", "h31",
2148 static const char *const b_name
[] =
2150 "b0", "b1", "b2", "b3",
2151 "b4", "b5", "b6", "b7",
2152 "b8", "b9", "b10", "b11",
2153 "b12", "b13", "b14", "b15",
2154 "b16", "b17", "b18", "b19",
2155 "b20", "b21", "b22", "b23",
2156 "b24", "b25", "b26", "b27",
2157 "b28", "b29", "b30", "b31",
2160 regnum
-= gdbarch_num_regs (gdbarch
);
2162 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2163 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2165 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2166 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2168 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2169 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2171 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2172 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2174 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2175 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2177 internal_error (__FILE__
, __LINE__
,
2178 _("aarch64_pseudo_register_name: bad register number %d"),
2182 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2184 static struct type
*
2185 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2187 regnum
-= gdbarch_num_regs (gdbarch
);
2189 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2190 return aarch64_vnq_type (gdbarch
);
2192 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2193 return aarch64_vnd_type (gdbarch
);
2195 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2196 return aarch64_vns_type (gdbarch
);
2198 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2199 return aarch64_vnh_type (gdbarch
);
2201 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2202 return aarch64_vnb_type (gdbarch
);
2204 internal_error (__FILE__
, __LINE__
,
2205 _("aarch64_pseudo_register_type: bad register number %d"),
2209 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2212 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2213 struct reggroup
*group
)
2215 regnum
-= gdbarch_num_regs (gdbarch
);
2217 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2218 return group
== all_reggroup
|| group
== vector_reggroup
;
2219 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2220 return (group
== all_reggroup
|| group
== vector_reggroup
2221 || group
== float_reggroup
);
2222 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2223 return (group
== all_reggroup
|| group
== vector_reggroup
2224 || group
== float_reggroup
);
2225 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2226 return group
== all_reggroup
|| group
== vector_reggroup
;
2227 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2228 return group
== all_reggroup
|| group
== vector_reggroup
;
2230 return group
== all_reggroup
;
2233 /* Implement the "pseudo_register_read_value" gdbarch method. */
2235 static struct value
*
2236 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2237 struct regcache
*regcache
,
2240 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2241 struct value
*result_value
;
2244 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2245 VALUE_LVAL (result_value
) = lval_register
;
2246 VALUE_REGNUM (result_value
) = regnum
;
2247 buf
= value_contents_raw (result_value
);
2249 regnum
-= gdbarch_num_regs (gdbarch
);
2251 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2253 enum register_status status
;
2256 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2257 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2258 if (status
!= REG_VALID
)
2259 mark_value_bytes_unavailable (result_value
, 0,
2260 TYPE_LENGTH (value_type (result_value
)));
2262 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2263 return result_value
;
2266 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2268 enum register_status status
;
2271 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2272 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2273 if (status
!= REG_VALID
)
2274 mark_value_bytes_unavailable (result_value
, 0,
2275 TYPE_LENGTH (value_type (result_value
)));
2277 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2278 return result_value
;
2281 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2283 enum register_status status
;
2286 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2287 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2288 if (status
!= REG_VALID
)
2289 mark_value_bytes_unavailable (result_value
, 0,
2290 TYPE_LENGTH (value_type (result_value
)));
2292 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2293 return result_value
;
2296 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2298 enum register_status status
;
2301 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2302 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2303 if (status
!= REG_VALID
)
2304 mark_value_bytes_unavailable (result_value
, 0,
2305 TYPE_LENGTH (value_type (result_value
)));
2307 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2308 return result_value
;
2311 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2313 enum register_status status
;
2316 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2317 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2318 if (status
!= REG_VALID
)
2319 mark_value_bytes_unavailable (result_value
, 0,
2320 TYPE_LENGTH (value_type (result_value
)));
2322 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2323 return result_value
;
2326 gdb_assert_not_reached ("regnum out of bound");
2329 /* Implement the "pseudo_register_write" gdbarch method. */
2332 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2333 int regnum
, const gdb_byte
*buf
)
2335 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2337 /* Ensure the register buffer is zero, we want gdb writes of the
2338 various 'scalar' pseudo registers to behavior like architectural
2339 writes, register width bytes are written the remainder are set to
2341 memset (reg_buf
, 0, sizeof (reg_buf
));
2343 regnum
-= gdbarch_num_regs (gdbarch
);
2345 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2347 /* pseudo Q registers */
2350 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2351 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2352 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2356 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2358 /* pseudo D registers */
2361 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2362 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2363 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2367 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2371 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2372 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2373 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2377 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2379 /* pseudo H registers */
2382 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2383 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2384 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2388 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2390 /* pseudo B registers */
2393 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2394 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2395 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2399 gdb_assert_not_reached ("regnum out of bound");
2402 /* Callback function for user_reg_add. */
2404 static struct value
*
2405 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2407 const int *reg_p
= (const int *) baton
;
2409 return value_of_register (*reg_p
, frame
);
2413 /* Implement the "software_single_step" gdbarch method, needed to
2414 single step through atomic sequences on AArch64. */
2416 static VEC (CORE_ADDR
) *
2417 aarch64_software_single_step (struct regcache
*regcache
)
2419 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2420 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2421 const int insn_size
= 4;
2422 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2423 CORE_ADDR pc
= regcache_read_pc (regcache
);
2424 CORE_ADDR breaks
[2] = { -1, -1 };
2426 CORE_ADDR closing_insn
= 0;
2427 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2428 byte_order_for_code
);
2431 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2432 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2434 VEC (CORE_ADDR
) *next_pcs
= NULL
;
2436 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2439 /* Look for a Load Exclusive instruction which begins the sequence. */
2440 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2443 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2446 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2447 byte_order_for_code
);
2449 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2451 /* Check if the instruction is a conditional branch. */
2452 if (inst
.opcode
->iclass
== condbranch
)
2454 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2456 if (bc_insn_count
>= 1)
2459 /* It is, so we'll try to set a breakpoint at the destination. */
2460 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2466 /* Look for the Store Exclusive which closes the atomic sequence. */
2467 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2474 /* We didn't find a closing Store Exclusive instruction, fall back. */
2478 /* Insert breakpoint after the end of the atomic sequence. */
2479 breaks
[0] = loc
+ insn_size
;
2481 /* Check for duplicated breakpoints, and also check that the second
2482 breakpoint is not within the atomic sequence. */
2484 && (breaks
[1] == breaks
[0]
2485 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2486 last_breakpoint
= 0;
2488 /* Insert the breakpoint at the end of the sequence, and one at the
2489 destination of the conditional branch, if it exists. */
2490 for (index
= 0; index
<= last_breakpoint
; index
++)
2491 VEC_safe_push (CORE_ADDR
, next_pcs
, breaks
[index
]);
2496 struct displaced_step_closure
2498 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2499 is being displaced stepping. */
2502 /* PC adjustment offset after displaced stepping. */
2506 /* Data when visiting instructions for displaced stepping. */
2508 struct aarch64_displaced_step_data
2510 struct aarch64_insn_data base
;
2512 /* The address where the instruction will be executed at. */
2514 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2515 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2516 /* Number of instructions in INSN_BUF. */
2517 unsigned insn_count
;
2518 /* Registers when doing displaced stepping. */
2519 struct regcache
*regs
;
2521 struct displaced_step_closure
*dsc
;
2524 /* Implementation of aarch64_insn_visitor method "b". */
2527 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2528 struct aarch64_insn_data
*data
)
2530 struct aarch64_displaced_step_data
*dsd
2531 = (struct aarch64_displaced_step_data
*) data
;
2532 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2534 if (can_encode_int32 (new_offset
, 28))
2536 /* Emit B rather than BL, because executing BL on a new address
2537 will get the wrong address into LR. In order to avoid this,
2538 we emit B, and update LR if the instruction is BL. */
2539 emit_b (dsd
->insn_buf
, 0, new_offset
);
2545 emit_nop (dsd
->insn_buf
);
2547 dsd
->dsc
->pc_adjust
= offset
;
2553 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2554 data
->insn_addr
+ 4);
2558 /* Implementation of aarch64_insn_visitor method "b_cond". */
2561 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2562 struct aarch64_insn_data
*data
)
2564 struct aarch64_displaced_step_data
*dsd
2565 = (struct aarch64_displaced_step_data
*) data
;
2567 /* GDB has to fix up PC after displaced step this instruction
2568 differently according to the condition is true or false. Instead
2569 of checking COND against conditional flags, we can use
2570 the following instructions, and GDB can tell how to fix up PC
2571 according to the PC value.
2573 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2579 emit_bcond (dsd
->insn_buf
, cond
, 8);
2581 dsd
->dsc
->pc_adjust
= offset
;
2582 dsd
->insn_count
= 1;
2585 /* Dynamically allocate a new register. If we know the register
2586 statically, we should make it a global as above instead of using this
2589 static struct aarch64_register
2590 aarch64_register (unsigned num
, int is64
)
2592 return (struct aarch64_register
) { num
, is64
};
2595 /* Implementation of aarch64_insn_visitor method "cb". */
2598 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2599 const unsigned rn
, int is64
,
2600 struct aarch64_insn_data
*data
)
2602 struct aarch64_displaced_step_data
*dsd
2603 = (struct aarch64_displaced_step_data
*) data
;
2605 /* The offset is out of range for a compare and branch
2606 instruction. We can use the following instructions instead:
2608 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2613 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2614 dsd
->insn_count
= 1;
2616 dsd
->dsc
->pc_adjust
= offset
;
2619 /* Implementation of aarch64_insn_visitor method "tb". */
2622 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2623 const unsigned rt
, unsigned bit
,
2624 struct aarch64_insn_data
*data
)
2626 struct aarch64_displaced_step_data
*dsd
2627 = (struct aarch64_displaced_step_data
*) data
;
2629 /* The offset is out of range for a test bit and branch
2630 instruction We can use the following instructions instead:
2632 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2638 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2639 dsd
->insn_count
= 1;
2641 dsd
->dsc
->pc_adjust
= offset
;
2644 /* Implementation of aarch64_insn_visitor method "adr". */
2647 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2648 const int is_adrp
, struct aarch64_insn_data
*data
)
2650 struct aarch64_displaced_step_data
*dsd
2651 = (struct aarch64_displaced_step_data
*) data
;
2652 /* We know exactly the address the ADR{P,} instruction will compute.
2653 We can just write it to the destination register. */
2654 CORE_ADDR address
= data
->insn_addr
+ offset
;
2658 /* Clear the lower 12 bits of the offset to get the 4K page. */
2659 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2663 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2666 dsd
->dsc
->pc_adjust
= 4;
2667 emit_nop (dsd
->insn_buf
);
2668 dsd
->insn_count
= 1;
2671 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2674 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2675 const unsigned rt
, const int is64
,
2676 struct aarch64_insn_data
*data
)
2678 struct aarch64_displaced_step_data
*dsd
2679 = (struct aarch64_displaced_step_data
*) data
;
2680 CORE_ADDR address
= data
->insn_addr
+ offset
;
2681 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2683 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2687 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2688 aarch64_register (rt
, 1), zero
);
2690 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2691 aarch64_register (rt
, 1), zero
);
2693 dsd
->dsc
->pc_adjust
= 4;
2696 /* Implementation of aarch64_insn_visitor method "others". */
2699 aarch64_displaced_step_others (const uint32_t insn
,
2700 struct aarch64_insn_data
*data
)
2702 struct aarch64_displaced_step_data
*dsd
2703 = (struct aarch64_displaced_step_data
*) data
;
2705 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2706 dsd
->insn_count
= 1;
2708 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2711 dsd
->dsc
->pc_adjust
= 0;
2714 dsd
->dsc
->pc_adjust
= 4;
2717 static const struct aarch64_insn_visitor visitor
=
2719 aarch64_displaced_step_b
,
2720 aarch64_displaced_step_b_cond
,
2721 aarch64_displaced_step_cb
,
2722 aarch64_displaced_step_tb
,
2723 aarch64_displaced_step_adr
,
2724 aarch64_displaced_step_ldr_literal
,
2725 aarch64_displaced_step_others
,
2728 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2730 struct displaced_step_closure
*
2731 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2732 CORE_ADDR from
, CORE_ADDR to
,
2733 struct regcache
*regs
)
2735 struct displaced_step_closure
*dsc
= NULL
;
2736 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2737 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2738 struct aarch64_displaced_step_data dsd
;
2741 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2744 /* Look for a Load Exclusive instruction which begins the sequence. */
2745 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2747 /* We can't displaced step atomic sequences. */
2751 dsc
= XCNEW (struct displaced_step_closure
);
2752 dsd
.base
.insn_addr
= from
;
2757 aarch64_relocate_instruction (insn
, &visitor
,
2758 (struct aarch64_insn_data
*) &dsd
);
2759 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2761 if (dsd
.insn_count
!= 0)
2765 /* Instruction can be relocated to scratch pad. Copy
2766 relocated instruction(s) there. */
2767 for (i
= 0; i
< dsd
.insn_count
; i
++)
2769 if (debug_displaced
)
2771 debug_printf ("displaced: writing insn ");
2772 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2773 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2775 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2776 (ULONGEST
) dsd
.insn_buf
[i
]);
2788 /* Implement the "displaced_step_fixup" gdbarch method. */
2791 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2792 struct displaced_step_closure
*dsc
,
2793 CORE_ADDR from
, CORE_ADDR to
,
2794 struct regcache
*regs
)
2800 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2803 /* Condition is true. */
2805 else if (pc
- to
== 4)
2807 /* Condition is false. */
2811 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2814 if (dsc
->pc_adjust
!= 0)
2816 if (debug_displaced
)
2818 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2819 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2821 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2822 from
+ dsc
->pc_adjust
);
2826 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2829 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2830 struct displaced_step_closure
*closure
)
2835 /* Initialize the current architecture based on INFO. If possible,
2836 re-use an architecture from ARCHES, which is a list of
2837 architectures already created during this debugging session.
2839 Called e.g. at program startup, when reading a core file, and when
2840 reading a binary file. */
2842 static struct gdbarch
*
2843 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2845 struct gdbarch_tdep
*tdep
;
2846 struct gdbarch
*gdbarch
;
2847 struct gdbarch_list
*best_arch
;
2848 struct tdesc_arch_data
*tdesc_data
= NULL
;
2849 const struct target_desc
*tdesc
= info
.target_desc
;
2852 const struct tdesc_feature
*feature
;
2854 int num_pseudo_regs
= 0;
2856 /* Ensure we always have a target descriptor. */
2857 if (!tdesc_has_registers (tdesc
))
2858 tdesc
= tdesc_aarch64
;
2862 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2864 if (feature
== NULL
)
2867 tdesc_data
= tdesc_data_alloc ();
2869 /* Validate the descriptor provides the mandatory core R registers
2870 and allocate their numbers. */
2871 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2873 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2874 aarch64_r_register_names
[i
]);
2876 num_regs
= AARCH64_X0_REGNUM
+ i
;
2878 /* Look for the V registers. */
2879 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2882 /* Validate the descriptor provides the mandatory V registers
2883 and allocate their numbers. */
2884 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2886 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2887 aarch64_v_register_names
[i
]);
2889 num_regs
= AARCH64_V0_REGNUM
+ i
;
2891 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2892 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2893 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2894 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2895 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2900 tdesc_data_cleanup (tdesc_data
);
2904 /* AArch64 code is always little-endian. */
2905 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2907 /* If there is already a candidate, use it. */
2908 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2910 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2912 /* Found a match. */
2916 if (best_arch
!= NULL
)
2918 if (tdesc_data
!= NULL
)
2919 tdesc_data_cleanup (tdesc_data
);
2920 return best_arch
->gdbarch
;
2923 tdep
= XCNEW (struct gdbarch_tdep
);
2924 gdbarch
= gdbarch_alloc (&info
, tdep
);
2926 /* This should be low enough for everything. */
2927 tdep
->lowest_pc
= 0x20;
2928 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2929 tdep
->jb_elt_size
= 8;
2931 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2932 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2934 /* Frame handling. */
2935 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2936 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2937 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2939 /* Advance PC across function entry code. */
2940 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2942 /* The stack grows downward. */
2943 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2945 /* Breakpoint manipulation. */
2946 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
2947 aarch64_breakpoint::kind_from_pc
);
2948 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
2949 aarch64_breakpoint::bp_from_kind
);
2950 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2951 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2953 /* Information about registers, etc. */
2954 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2955 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2956 set_gdbarch_num_regs (gdbarch
, num_regs
);
2958 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2959 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2960 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2961 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2962 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2963 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2964 aarch64_pseudo_register_reggroup_p
);
2967 set_gdbarch_short_bit (gdbarch
, 16);
2968 set_gdbarch_int_bit (gdbarch
, 32);
2969 set_gdbarch_float_bit (gdbarch
, 32);
2970 set_gdbarch_double_bit (gdbarch
, 64);
2971 set_gdbarch_long_double_bit (gdbarch
, 128);
2972 set_gdbarch_long_bit (gdbarch
, 64);
2973 set_gdbarch_long_long_bit (gdbarch
, 64);
2974 set_gdbarch_ptr_bit (gdbarch
, 64);
2975 set_gdbarch_char_signed (gdbarch
, 0);
2976 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2977 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2978 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2980 /* Internal <-> external register number maps. */
2981 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2983 /* Returning results. */
2984 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2987 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2989 /* Virtual tables. */
2990 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2992 /* Hook in the ABI-specific overrides, if they have been registered. */
2993 info
.target_desc
= tdesc
;
2994 info
.tdep_info
= (void *) tdesc_data
;
2995 gdbarch_init_osabi (info
, gdbarch
);
2997 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2999 /* Add some default predicates. */
3000 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3001 dwarf2_append_unwinders (gdbarch
);
3002 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3004 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3006 /* Now we have tuned the configuration, set a few final things,
3007 based on what the OS ABI has told us. */
3009 if (tdep
->jb_pc
>= 0)
3010 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3012 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3014 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3016 /* Add standard register aliases. */
3017 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3018 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3019 value_of_aarch64_user_reg
,
3020 &aarch64_register_aliases
[i
].regnum
);
3026 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3028 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3033 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3034 paddress (gdbarch
, tdep
->lowest_pc
));
3037 /* Suppress warning from -Wmissing-prototypes. */
3038 extern initialize_file_ftype _initialize_aarch64_tdep
;
3041 _initialize_aarch64_tdep (void)
3043 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3046 initialize_tdesc_aarch64 ();
3048 /* Debug this file's internals. */
3049 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3050 Set AArch64 debugging."), _("\
3051 Show AArch64 debugging."), _("\
3052 When on, AArch64 specific debugging is enabled."),
3055 &setdebuglist
, &showdebuglist
);
3058 register_self_test (selftests::aarch64_analyze_prologue_test
);
3062 /* AArch64 process record-replay related structures, defines etc. */
3064 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3067 unsigned int reg_len = LENGTH; \
3070 REGS = XNEWVEC (uint32_t, reg_len); \
3071 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3076 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3079 unsigned int mem_len = LENGTH; \
3082 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3083 memcpy(&MEMS->len, &RECORD_BUF[0], \
3084 sizeof(struct aarch64_mem_r) * LENGTH); \
3089 /* AArch64 record/replay structures and enumerations. */
3091 struct aarch64_mem_r
3093 uint64_t len
; /* Record length. */
3094 uint64_t addr
; /* Memory address. */
3097 enum aarch64_record_result
3099 AARCH64_RECORD_SUCCESS
,
3100 AARCH64_RECORD_FAILURE
,
3101 AARCH64_RECORD_UNSUPPORTED
,
3102 AARCH64_RECORD_UNKNOWN
3105 typedef struct insn_decode_record_t
3107 struct gdbarch
*gdbarch
;
3108 struct regcache
*regcache
;
3109 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3110 uint32_t aarch64_insn
; /* Insn to be recorded. */
3111 uint32_t mem_rec_count
; /* Count of memory records. */
3112 uint32_t reg_rec_count
; /* Count of register records. */
3113 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3114 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3115 } insn_decode_record
;
3117 /* Record handler for data processing - register instructions. */
3120 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3122 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3123 uint32_t record_buf
[4];
3125 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3126 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3127 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3129 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3133 /* Logical (shifted register). */
3134 if (insn_bits24_27
== 0x0a)
3135 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3137 else if (insn_bits24_27
== 0x0b)
3138 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3140 return AARCH64_RECORD_UNKNOWN
;
3142 record_buf
[0] = reg_rd
;
3143 aarch64_insn_r
->reg_rec_count
= 1;
3145 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3149 if (insn_bits24_27
== 0x0b)
3151 /* Data-processing (3 source). */
3152 record_buf
[0] = reg_rd
;
3153 aarch64_insn_r
->reg_rec_count
= 1;
3155 else if (insn_bits24_27
== 0x0a)
3157 if (insn_bits21_23
== 0x00)
3159 /* Add/subtract (with carry). */
3160 record_buf
[0] = reg_rd
;
3161 aarch64_insn_r
->reg_rec_count
= 1;
3162 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3164 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3165 aarch64_insn_r
->reg_rec_count
= 2;
3168 else if (insn_bits21_23
== 0x02)
3170 /* Conditional compare (register) and conditional compare
3171 (immediate) instructions. */
3172 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3173 aarch64_insn_r
->reg_rec_count
= 1;
3175 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3177 /* CConditional select. */
3178 /* Data-processing (2 source). */
3179 /* Data-processing (1 source). */
3180 record_buf
[0] = reg_rd
;
3181 aarch64_insn_r
->reg_rec_count
= 1;
3184 return AARCH64_RECORD_UNKNOWN
;
3188 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3190 return AARCH64_RECORD_SUCCESS
;
3193 /* Record handler for data processing - immediate instructions. */
3196 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3198 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3199 uint32_t record_buf
[4];
3201 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3202 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3203 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3205 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3206 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3207 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3209 record_buf
[0] = reg_rd
;
3210 aarch64_insn_r
->reg_rec_count
= 1;
3212 else if (insn_bits24_27
== 0x01)
3214 /* Add/Subtract (immediate). */
3215 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3216 record_buf
[0] = reg_rd
;
3217 aarch64_insn_r
->reg_rec_count
= 1;
3219 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3221 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3223 /* Logical (immediate). */
3224 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3225 record_buf
[0] = reg_rd
;
3226 aarch64_insn_r
->reg_rec_count
= 1;
3228 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3231 return AARCH64_RECORD_UNKNOWN
;
3233 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3235 return AARCH64_RECORD_SUCCESS
;
3238 /* Record handler for branch, exception generation and system instructions. */
3241 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3243 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3244 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3245 uint32_t record_buf
[4];
3247 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3248 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3249 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3251 if (insn_bits28_31
== 0x0d)
3253 /* Exception generation instructions. */
3254 if (insn_bits24_27
== 0x04)
3256 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3257 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3258 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3260 ULONGEST svc_number
;
3262 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3264 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3268 return AARCH64_RECORD_UNSUPPORTED
;
3270 /* System instructions. */
3271 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3273 uint32_t reg_rt
, reg_crn
;
3275 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3276 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3278 /* Record rt in case of sysl and mrs instructions. */
3279 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3281 record_buf
[0] = reg_rt
;
3282 aarch64_insn_r
->reg_rec_count
= 1;
3284 /* Record cpsr for hint and msr(immediate) instructions. */
3285 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3287 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3288 aarch64_insn_r
->reg_rec_count
= 1;
3291 /* Unconditional branch (register). */
3292 else if((insn_bits24_27
& 0x0e) == 0x06)
3294 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3295 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3296 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3299 return AARCH64_RECORD_UNKNOWN
;
3301 /* Unconditional branch (immediate). */
3302 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3304 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3305 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3306 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3309 /* Compare & branch (immediate), Test & branch (immediate) and
3310 Conditional branch (immediate). */
3311 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3313 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3315 return AARCH64_RECORD_SUCCESS
;
3318 /* Record handler for advanced SIMD load and store instructions. */
3321 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3324 uint64_t addr_offset
= 0;
3325 uint32_t record_buf
[24];
3326 uint64_t record_buf_mem
[24];
3327 uint32_t reg_rn
, reg_rt
;
3328 uint32_t reg_index
= 0, mem_index
= 0;
3329 uint8_t opcode_bits
, size_bits
;
3331 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3332 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3333 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3334 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3335 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3338 debug_printf ("Process record: Advanced SIMD load/store\n");
3340 /* Load/store single structure. */
3341 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3343 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3344 scale
= opcode_bits
>> 2;
3345 selem
= ((opcode_bits
& 0x02) |
3346 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3350 if (size_bits
& 0x01)
3351 return AARCH64_RECORD_UNKNOWN
;
3354 if ((size_bits
>> 1) & 0x01)
3355 return AARCH64_RECORD_UNKNOWN
;
3356 if (size_bits
& 0x01)
3358 if (!((opcode_bits
>> 1) & 0x01))
3361 return AARCH64_RECORD_UNKNOWN
;
3365 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3372 return AARCH64_RECORD_UNKNOWN
;
3378 for (sindex
= 0; sindex
< selem
; sindex
++)
3380 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3381 reg_rt
= (reg_rt
+ 1) % 32;
3385 for (sindex
= 0; sindex
< selem
; sindex
++)
3387 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3388 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3391 record_buf_mem
[mem_index
++] = esize
/ 8;
3392 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3394 addr_offset
= addr_offset
+ (esize
/ 8);
3395 reg_rt
= (reg_rt
+ 1) % 32;
3399 /* Load/store multiple structure. */
3402 uint8_t selem
, esize
, rpt
, elements
;
3403 uint8_t eindex
, rindex
;
3405 esize
= 8 << size_bits
;
3406 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3407 elements
= 128 / esize
;
3409 elements
= 64 / esize
;
3411 switch (opcode_bits
)
3413 /*LD/ST4 (4 Registers). */
3418 /*LD/ST1 (4 Registers). */
3423 /*LD/ST3 (3 Registers). */
3428 /*LD/ST1 (3 Registers). */
3433 /*LD/ST1 (1 Register). */
3438 /*LD/ST2 (2 Registers). */
3443 /*LD/ST1 (2 Registers). */
3449 return AARCH64_RECORD_UNSUPPORTED
;
3452 for (rindex
= 0; rindex
< rpt
; rindex
++)
3453 for (eindex
= 0; eindex
< elements
; eindex
++)
3455 uint8_t reg_tt
, sindex
;
3456 reg_tt
= (reg_rt
+ rindex
) % 32;
3457 for (sindex
= 0; sindex
< selem
; sindex
++)
3459 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3460 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3463 record_buf_mem
[mem_index
++] = esize
/ 8;
3464 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3466 addr_offset
= addr_offset
+ (esize
/ 8);
3467 reg_tt
= (reg_tt
+ 1) % 32;
3472 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3473 record_buf
[reg_index
++] = reg_rn
;
3475 aarch64_insn_r
->reg_rec_count
= reg_index
;
3476 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3477 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3479 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3481 return AARCH64_RECORD_SUCCESS
;
3484 /* Record handler for load and store instructions. */
3487 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3489 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3490 uint8_t insn_bit23
, insn_bit21
;
3491 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3492 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3493 uint64_t datasize
, offset
;
3494 uint32_t record_buf
[8];
3495 uint64_t record_buf_mem
[8];
3498 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3499 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3500 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3501 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3502 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3503 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3504 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3505 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3506 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3507 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3508 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3510 /* Load/store exclusive. */
3511 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3514 debug_printf ("Process record: load/store exclusive\n");
3518 record_buf
[0] = reg_rt
;
3519 aarch64_insn_r
->reg_rec_count
= 1;
3522 record_buf
[1] = reg_rt2
;
3523 aarch64_insn_r
->reg_rec_count
= 2;
3529 datasize
= (8 << size_bits
) * 2;
3531 datasize
= (8 << size_bits
);
3532 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3534 record_buf_mem
[0] = datasize
/ 8;
3535 record_buf_mem
[1] = address
;
3536 aarch64_insn_r
->mem_rec_count
= 1;
3539 /* Save register rs. */
3540 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3541 aarch64_insn_r
->reg_rec_count
= 1;
3545 /* Load register (literal) instructions decoding. */
3546 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3549 debug_printf ("Process record: load register (literal)\n");
3551 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3553 record_buf
[0] = reg_rt
;
3554 aarch64_insn_r
->reg_rec_count
= 1;
3556 /* All types of load/store pair instructions decoding. */
3557 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3560 debug_printf ("Process record: load/store pair\n");
3566 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3567 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3571 record_buf
[0] = reg_rt
;
3572 record_buf
[1] = reg_rt2
;
3574 aarch64_insn_r
->reg_rec_count
= 2;
3579 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3581 size_bits
= size_bits
>> 1;
3582 datasize
= 8 << (2 + size_bits
);
3583 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3584 offset
= offset
<< (2 + size_bits
);
3585 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3587 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3589 if (imm7_off
& 0x40)
3590 address
= address
- offset
;
3592 address
= address
+ offset
;
3595 record_buf_mem
[0] = datasize
/ 8;
3596 record_buf_mem
[1] = address
;
3597 record_buf_mem
[2] = datasize
/ 8;
3598 record_buf_mem
[3] = address
+ (datasize
/ 8);
3599 aarch64_insn_r
->mem_rec_count
= 2;
3601 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3602 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3604 /* Load/store register (unsigned immediate) instructions. */
3605 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3607 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3614 if (size_bits
!= 0x03)
3617 return AARCH64_RECORD_UNKNOWN
;
3621 debug_printf ("Process record: load/store (unsigned immediate):"
3622 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3628 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3629 datasize
= 8 << size_bits
;
3630 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3632 offset
= offset
<< size_bits
;
3633 address
= address
+ offset
;
3635 record_buf_mem
[0] = datasize
>> 3;
3636 record_buf_mem
[1] = address
;
3637 aarch64_insn_r
->mem_rec_count
= 1;
3642 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3644 record_buf
[0] = reg_rt
;
3645 aarch64_insn_r
->reg_rec_count
= 1;
3648 /* Load/store register (register offset) instructions. */
3649 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3650 && insn_bits10_11
== 0x02 && insn_bit21
)
3653 debug_printf ("Process record: load/store (register offset)\n");
3654 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3661 if (size_bits
!= 0x03)
3664 return AARCH64_RECORD_UNKNOWN
;
3668 ULONGEST reg_rm_val
;
3670 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3671 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3672 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3673 offset
= reg_rm_val
<< size_bits
;
3675 offset
= reg_rm_val
;
3676 datasize
= 8 << size_bits
;
3677 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3679 address
= address
+ offset
;
3680 record_buf_mem
[0] = datasize
>> 3;
3681 record_buf_mem
[1] = address
;
3682 aarch64_insn_r
->mem_rec_count
= 1;
3687 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3689 record_buf
[0] = reg_rt
;
3690 aarch64_insn_r
->reg_rec_count
= 1;
3693 /* Load/store register (immediate and unprivileged) instructions. */
3694 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3699 debug_printf ("Process record: load/store "
3700 "(immediate and unprivileged)\n");
3702 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3709 if (size_bits
!= 0x03)
3712 return AARCH64_RECORD_UNKNOWN
;
3717 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3718 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3719 datasize
= 8 << size_bits
;
3720 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3722 if (insn_bits10_11
!= 0x01)
3724 if (imm9_off
& 0x0100)
3725 address
= address
- offset
;
3727 address
= address
+ offset
;
3729 record_buf_mem
[0] = datasize
>> 3;
3730 record_buf_mem
[1] = address
;
3731 aarch64_insn_r
->mem_rec_count
= 1;
3736 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3738 record_buf
[0] = reg_rt
;
3739 aarch64_insn_r
->reg_rec_count
= 1;
3741 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3742 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3744 /* Advanced SIMD load/store instructions. */
3746 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3748 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3750 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3752 return AARCH64_RECORD_SUCCESS
;
3755 /* Record handler for data processing SIMD and floating point instructions. */
3758 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3760 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3761 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3762 uint8_t insn_bits11_14
;
3763 uint32_t record_buf
[2];
3765 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3766 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3767 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3768 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3769 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3770 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3771 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3772 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3773 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3776 debug_printf ("Process record: data processing SIMD/FP: ");
3778 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3780 /* Floating point - fixed point conversion instructions. */
3784 debug_printf ("FP - fixed point conversion");
3786 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3787 record_buf
[0] = reg_rd
;
3789 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3791 /* Floating point - conditional compare instructions. */
3792 else if (insn_bits10_11
== 0x01)
3795 debug_printf ("FP - conditional compare");
3797 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3799 /* Floating point - data processing (2-source) and
3800 conditional select instructions. */
3801 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3804 debug_printf ("FP - DP (2-source)");
3806 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3808 else if (insn_bits10_11
== 0x00)
3810 /* Floating point - immediate instructions. */
3811 if ((insn_bits12_15
& 0x01) == 0x01
3812 || (insn_bits12_15
& 0x07) == 0x04)
3815 debug_printf ("FP - immediate");
3816 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3818 /* Floating point - compare instructions. */
3819 else if ((insn_bits12_15
& 0x03) == 0x02)
3822 debug_printf ("FP - immediate");
3823 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3825 /* Floating point - integer conversions instructions. */
3826 else if (insn_bits12_15
== 0x00)
3828 /* Convert float to integer instruction. */
3829 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3832 debug_printf ("float to int conversion");
3834 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3836 /* Convert integer to float instruction. */
3837 else if ((opcode
>> 1) == 0x01 && !rmode
)
3840 debug_printf ("int to float conversion");
3842 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3844 /* Move float to integer instruction. */
3845 else if ((opcode
>> 1) == 0x03)
3848 debug_printf ("move float to int");
3850 if (!(opcode
& 0x01))
3851 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3853 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3856 return AARCH64_RECORD_UNKNOWN
;
3859 return AARCH64_RECORD_UNKNOWN
;
3862 return AARCH64_RECORD_UNKNOWN
;
3864 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3867 debug_printf ("SIMD copy");
3869 /* Advanced SIMD copy instructions. */
3870 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3871 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3872 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3874 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3875 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3877 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3880 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3882 /* All remaining floating point or advanced SIMD instructions. */
3886 debug_printf ("all remain");
3888 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3892 debug_printf ("\n");
3894 aarch64_insn_r
->reg_rec_count
++;
3895 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3896 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3898 return AARCH64_RECORD_SUCCESS
;
3901 /* Decodes insns type and invokes its record handler. */
3904 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3906 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3908 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3909 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3910 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3911 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3913 /* Data processing - immediate instructions. */
3914 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3915 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3917 /* Branch, exception generation and system instructions. */
3918 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3919 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3921 /* Load and store instructions. */
3922 if (!ins_bit25
&& ins_bit27
)
3923 return aarch64_record_load_store (aarch64_insn_r
);
3925 /* Data processing - register instructions. */
3926 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3927 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3929 /* Data processing - SIMD and floating point instructions. */
3930 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3931 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3933 return AARCH64_RECORD_UNSUPPORTED
;
3936 /* Cleans up local record registers and memory allocations. */
3939 deallocate_reg_mem (insn_decode_record
*record
)
3941 xfree (record
->aarch64_regs
);
3942 xfree (record
->aarch64_mems
);
3945 /* Parse the current instruction and record the values of the registers and
3946 memory that will be changed in current instruction to record_arch_list
3947 return -1 if something is wrong. */
3950 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3951 CORE_ADDR insn_addr
)
3953 uint32_t rec_no
= 0;
3954 uint8_t insn_size
= 4;
3956 gdb_byte buf
[insn_size
];
3957 insn_decode_record aarch64_record
;
3959 memset (&buf
[0], 0, insn_size
);
3960 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3961 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3962 aarch64_record
.aarch64_insn
3963 = (uint32_t) extract_unsigned_integer (&buf
[0],
3965 gdbarch_byte_order (gdbarch
));
3966 aarch64_record
.regcache
= regcache
;
3967 aarch64_record
.this_addr
= insn_addr
;
3968 aarch64_record
.gdbarch
= gdbarch
;
3970 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3971 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3973 printf_unfiltered (_("Process record does not support instruction "
3974 "0x%0x at address %s.\n"),
3975 aarch64_record
.aarch64_insn
,
3976 paddress (gdbarch
, insn_addr
));
3982 /* Record registers. */
3983 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3985 /* Always record register CPSR. */
3986 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3987 AARCH64_CPSR_REGNUM
);
3988 if (aarch64_record
.aarch64_regs
)
3989 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3990 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3991 aarch64_record
.aarch64_regs
[rec_no
]))
3994 /* Record memories. */
3995 if (aarch64_record
.aarch64_mems
)
3996 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3997 if (record_full_arch_list_add_mem
3998 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3999 aarch64_record
.aarch64_mems
[rec_no
].len
))
4002 if (record_full_arch_list_add_end ())
4006 deallocate_reg_mem (&aarch64_record
);