1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 /* Pseudo register base numbers. */
61 #define AARCH64_Q0_REGNUM 0
62 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
67 /* The standard register names, and all the valid aliases for them. */
70 const char *const name
;
72 } aarch64_register_aliases
[] =
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM
},
76 {"lr", AARCH64_LR_REGNUM
},
77 {"sp", AARCH64_SP_REGNUM
},
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM
+ 0},
81 {"w1", AARCH64_X0_REGNUM
+ 1},
82 {"w2", AARCH64_X0_REGNUM
+ 2},
83 {"w3", AARCH64_X0_REGNUM
+ 3},
84 {"w4", AARCH64_X0_REGNUM
+ 4},
85 {"w5", AARCH64_X0_REGNUM
+ 5},
86 {"w6", AARCH64_X0_REGNUM
+ 6},
87 {"w7", AARCH64_X0_REGNUM
+ 7},
88 {"w8", AARCH64_X0_REGNUM
+ 8},
89 {"w9", AARCH64_X0_REGNUM
+ 9},
90 {"w10", AARCH64_X0_REGNUM
+ 10},
91 {"w11", AARCH64_X0_REGNUM
+ 11},
92 {"w12", AARCH64_X0_REGNUM
+ 12},
93 {"w13", AARCH64_X0_REGNUM
+ 13},
94 {"w14", AARCH64_X0_REGNUM
+ 14},
95 {"w15", AARCH64_X0_REGNUM
+ 15},
96 {"w16", AARCH64_X0_REGNUM
+ 16},
97 {"w17", AARCH64_X0_REGNUM
+ 17},
98 {"w18", AARCH64_X0_REGNUM
+ 18},
99 {"w19", AARCH64_X0_REGNUM
+ 19},
100 {"w20", AARCH64_X0_REGNUM
+ 20},
101 {"w21", AARCH64_X0_REGNUM
+ 21},
102 {"w22", AARCH64_X0_REGNUM
+ 22},
103 {"w23", AARCH64_X0_REGNUM
+ 23},
104 {"w24", AARCH64_X0_REGNUM
+ 24},
105 {"w25", AARCH64_X0_REGNUM
+ 25},
106 {"w26", AARCH64_X0_REGNUM
+ 26},
107 {"w27", AARCH64_X0_REGNUM
+ 27},
108 {"w28", AARCH64_X0_REGNUM
+ 28},
109 {"w29", AARCH64_X0_REGNUM
+ 29},
110 {"w30", AARCH64_X0_REGNUM
+ 30},
113 {"ip0", AARCH64_X0_REGNUM
+ 16},
114 {"ip1", AARCH64_X0_REGNUM
+ 17}
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names
[] =
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names
[] =
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
150 /* AArch64 prologue cache structure. */
151 struct aarch64_prologue_cache
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
167 /* Is the target available to read from? */
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
175 /* The register used to hold the frame pointer for this frame. */
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg
*saved_regs
;
182 /* Toggle this file's internal debugging dump. */
183 static int aarch64_debug
;
186 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
187 struct cmd_list_element
*c
, const char *value
)
189 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
192 /* Extract a signed value from a bit field within an instruction
195 INSN is the instruction opcode.
197 WIDTH specifies the width of the bit field to extract (in bits).
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
203 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
205 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
206 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
208 return ((int32_t) insn
<< shift_l
) >> shift_r
;
211 /* Determine if specified bits within an instruction opcode matches a
214 INSN is the instruction opcode.
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
220 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
222 return (insn
& mask
) == pattern
;
225 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
234 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
237 if ((insn
& 0x9f000000) == 0x91000000)
242 *rd
= (insn
>> 0) & 0x1f;
243 *rn
= (insn
>> 5) & 0x1f;
244 *imm
= (insn
>> 10) & 0xfff;
245 shift
= (insn
>> 22) & 0x3;
246 op_is_sub
= (insn
>> 30) & 0x1;
265 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
274 /* Decode an opcode if it represents an ADRP instruction.
276 ADDR specifies the address of the opcode.
277 INSN specifies the opcode to test.
278 RD receives the 'rd' field from the decoded instruction.
280 Return 1 if the opcodes matches and is decoded, otherwise 0. */
283 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
285 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
287 *rd
= (insn
>> 0) & 0x1f;
291 debug_printf ("decode: 0x%s 0x%x adrp x%u, #?\n",
292 core_addr_to_string_nz (addr
), insn
, *rd
);
299 /* Decode an opcode if it represents an branch immediate or branch
300 and link immediate instruction.
302 ADDR specifies the address of the opcode.
303 INSN specifies the opcode to test.
304 IS_BL receives the 'op' bit from the decoded instruction.
305 OFFSET receives the immediate offset from the decoded instruction.
307 Return 1 if the opcodes matches and is decoded, otherwise 0. */
310 decode_b (CORE_ADDR addr
, uint32_t insn
, int *is_bl
, int32_t *offset
)
312 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
313 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
314 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
316 *is_bl
= (insn
>> 31) & 0x1;
317 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
321 debug_printf ("decode: 0x%s 0x%x %s 0x%s\n",
322 core_addr_to_string_nz (addr
), insn
,
324 core_addr_to_string_nz (addr
+ *offset
));
332 /* Decode an opcode if it represents a conditional branch instruction.
334 ADDR specifies the address of the opcode.
335 INSN specifies the opcode to test.
336 COND receives the branch condition field from the decoded
338 OFFSET receives the immediate offset from the decoded instruction.
340 Return 1 if the opcodes matches and is decoded, otherwise 0. */
343 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
345 /* b.cond 0101 0100 iiii iiii iiii iiii iii0 cccc */
346 if (decode_masked_match (insn
, 0xff000010, 0x54000000))
348 *cond
= (insn
>> 0) & 0xf;
349 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
353 debug_printf ("decode: 0x%s 0x%x b<%u> 0x%s\n",
354 core_addr_to_string_nz (addr
), insn
, *cond
,
355 core_addr_to_string_nz (addr
+ *offset
));
362 /* Decode an opcode if it represents a branch via register instruction.
364 ADDR specifies the address of the opcode.
365 INSN specifies the opcode to test.
366 IS_BLR receives the 'op' bit from the decoded instruction.
367 RN receives the 'rn' field from the decoded instruction.
369 Return 1 if the opcodes matches and is decoded, otherwise 0. */
372 decode_br (CORE_ADDR addr
, uint32_t insn
, int *is_blr
, unsigned *rn
)
374 /* 8 4 0 6 2 8 4 0 */
375 /* blr 110101100011111100000000000rrrrr */
376 /* br 110101100001111100000000000rrrrr */
377 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
379 *is_blr
= (insn
>> 21) & 1;
380 *rn
= (insn
>> 5) & 0x1f;
384 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
385 core_addr_to_string_nz (addr
), insn
,
386 *is_blr
? "blr" : "br", *rn
);
394 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
396 ADDR specifies the address of the opcode.
397 INSN specifies the opcode to test.
398 IS64 receives the 'sf' field from the decoded instruction.
399 IS_CBNZ receives the 'op' field from the decoded instruction.
400 RN receives the 'rn' field from the decoded instruction.
401 OFFSET receives the 'imm19' field from the decoded instruction.
403 Return 1 if the opcodes matches and is decoded, otherwise 0. */
406 decode_cb (CORE_ADDR addr
, uint32_t insn
, int *is64
, int *is_cbnz
,
407 unsigned *rn
, int32_t *offset
)
409 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
410 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
411 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
413 *rn
= (insn
>> 0) & 0x1f;
414 *is64
= (insn
>> 31) & 0x1;
415 *is_cbnz
= (insn
>> 24) & 0x1;
416 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
420 debug_printf ("decode: 0x%s 0x%x %s 0x%s\n",
421 core_addr_to_string_nz (addr
), insn
,
422 *is_cbnz
? "cbnz" : "cbz",
423 core_addr_to_string_nz (addr
+ *offset
));
430 /* Decode an opcode if it represents a ERET instruction.
432 ADDR specifies the address of the opcode.
433 INSN specifies the opcode to test.
435 Return 1 if the opcodes matches and is decoded, otherwise 0. */
438 decode_eret (CORE_ADDR addr
, uint32_t insn
)
440 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
441 if (insn
== 0xd69f03e0)
445 debug_printf ("decode: 0x%s 0x%x eret\n",
446 core_addr_to_string_nz (addr
), insn
);
453 /* Decode an opcode if it represents a MOVZ instruction.
455 ADDR specifies the address of the opcode.
456 INSN specifies the opcode to test.
457 RD receives the 'rd' field from the decoded instruction.
459 Return 1 if the opcodes matches and is decoded, otherwise 0. */
462 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
464 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
466 *rd
= (insn
>> 0) & 0x1f;
470 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
471 core_addr_to_string_nz (addr
), insn
, *rd
);
478 /* Decode an opcode if it represents a ORR (shifted register)
481 ADDR specifies the address of the opcode.
482 INSN specifies the opcode to test.
483 RD receives the 'rd' field from the decoded instruction.
484 RN receives the 'rn' field from the decoded instruction.
485 RM receives the 'rm' field from the decoded instruction.
486 IMM receives the 'imm6' field from the decoded instruction.
488 Return 1 if the opcodes matches and is decoded, otherwise 0. */
491 decode_orr_shifted_register_x (CORE_ADDR addr
,
492 uint32_t insn
, unsigned *rd
, unsigned *rn
,
493 unsigned *rm
, int32_t *imm
)
495 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
497 *rd
= (insn
>> 0) & 0x1f;
498 *rn
= (insn
>> 5) & 0x1f;
499 *rm
= (insn
>> 16) & 0x1f;
500 *imm
= (insn
>> 10) & 0x3f;
504 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
505 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
513 /* Decode an opcode if it represents a RET instruction.
515 ADDR specifies the address of the opcode.
516 INSN specifies the opcode to test.
517 RN receives the 'rn' field from the decoded instruction.
519 Return 1 if the opcodes matches and is decoded, otherwise 0. */
522 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
524 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
526 *rn
= (insn
>> 5) & 0x1f;
529 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
530 core_addr_to_string_nz (addr
), insn
, *rn
);
537 /* Decode an opcode if it represents the following instruction:
538 STP rt, rt2, [rn, #imm]
540 ADDR specifies the address of the opcode.
541 INSN specifies the opcode to test.
542 RT1 receives the 'rt' field from the decoded instruction.
543 RT2 receives the 'rt2' field from the decoded instruction.
544 RN receives the 'rn' field from the decoded instruction.
545 IMM receives the 'imm' field from the decoded instruction.
547 Return 1 if the opcodes matches and is decoded, otherwise 0. */
550 decode_stp_offset (CORE_ADDR addr
,
552 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
554 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
556 *rt1
= (insn
>> 0) & 0x1f;
557 *rn
= (insn
>> 5) & 0x1f;
558 *rt2
= (insn
>> 10) & 0x1f;
559 *imm
= extract_signed_bitfield (insn
, 7, 15);
564 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
565 core_addr_to_string_nz (addr
), insn
, *rt1
, *rt2
,
573 /* Decode an opcode if it represents the following instruction:
574 STP rt, rt2, [rn, #imm]!
576 ADDR specifies the address of the opcode.
577 INSN specifies the opcode to test.
578 RT1 receives the 'rt' field from the decoded instruction.
579 RT2 receives the 'rt2' field from the decoded instruction.
580 RN receives the 'rn' field from the decoded instruction.
581 IMM receives the 'imm' field from the decoded instruction.
583 Return 1 if the opcodes matches and is decoded, otherwise 0. */
586 decode_stp_offset_wb (CORE_ADDR addr
,
588 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
591 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
593 *rt1
= (insn
>> 0) & 0x1f;
594 *rn
= (insn
>> 5) & 0x1f;
595 *rt2
= (insn
>> 10) & 0x1f;
596 *imm
= extract_signed_bitfield (insn
, 7, 15);
601 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
602 core_addr_to_string_nz (addr
), insn
, *rt1
, *rt2
,
610 /* Decode an opcode if it represents the following instruction:
613 ADDR specifies the address of the opcode.
614 INSN specifies the opcode to test.
615 IS64 receives size field from the decoded instruction.
616 RT receives the 'rt' field from the decoded instruction.
617 RN receives the 'rn' field from the decoded instruction.
618 IMM receives the 'imm' field from the decoded instruction.
620 Return 1 if the opcodes matches and is decoded, otherwise 0. */
623 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
624 unsigned *rn
, int32_t *imm
)
626 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
628 *is64
= (insn
>> 30) & 1;
629 *rt
= (insn
>> 0) & 0x1f;
630 *rn
= (insn
>> 5) & 0x1f;
631 *imm
= extract_signed_bitfield (insn
, 9, 12);
635 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
636 core_addr_to_string_nz (addr
), insn
,
637 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
644 /* Decode an opcode if it represents a TBZ or TBNZ instruction.
646 ADDR specifies the address of the opcode.
647 INSN specifies the opcode to test.
648 IS_TBNZ receives the 'op' field from the decoded instruction.
649 BIT receives the bit position field from the decoded instruction.
650 RT receives 'rt' field from the decoded instruction.
651 IMM receives 'imm' field from the decoded instruction.
653 Return 1 if the opcodes matches and is decoded, otherwise 0. */
656 decode_tb (CORE_ADDR addr
, uint32_t insn
, int *is_tbnz
, unsigned *bit
,
657 unsigned *rt
, int32_t *imm
)
659 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
660 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
661 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
663 *rt
= (insn
>> 0) & 0x1f;
664 *is_tbnz
= (insn
>> 24) & 0x1;
665 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
666 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
670 debug_printf ("decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
671 core_addr_to_string_nz (addr
), insn
,
672 *is_tbnz
? "tbnz" : "tbz", *rt
, *bit
,
673 core_addr_to_string_nz (addr
+ *imm
));
680 /* Analyze a prologue, looking for a recognizable stack frame
681 and frame pointer. Scan until we encounter a store that could
682 clobber the stack frame unexpectedly, or an unknown instruction. */
685 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
686 CORE_ADDR start
, CORE_ADDR limit
,
687 struct aarch64_prologue_cache
*cache
)
689 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
691 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
692 struct pv_area
*stack
;
693 struct cleanup
*back_to
;
695 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
696 regs
[i
] = pv_register (i
, 0);
697 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
698 back_to
= make_cleanup_free_pv_area (stack
);
700 for (; start
< limit
; start
+= 4)
719 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
721 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
722 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
723 else if (decode_adrp (start
, insn
, &rd
))
724 regs
[rd
] = pv_unknown ();
725 else if (decode_b (start
, insn
, &is_link
, &offset
))
727 /* Stop analysis on branch. */
730 else if (decode_bcond (start
, insn
, &cond
, &offset
))
732 /* Stop analysis on branch. */
735 else if (decode_br (start
, insn
, &is_link
, &rn
))
737 /* Stop analysis on branch. */
740 else if (decode_cb (start
, insn
, &is64
, &is_cbnz
, &rn
, &offset
))
742 /* Stop analysis on branch. */
745 else if (decode_eret (start
, insn
))
747 /* Stop analysis on branch. */
750 else if (decode_movz (start
, insn
, &rd
))
751 regs
[rd
] = pv_unknown ();
753 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
755 if (imm
== 0 && rn
== 31)
761 debug_printf ("aarch64: prologue analysis gave up "
762 "addr=0x%s opcode=0x%x (orr x register)\n",
763 core_addr_to_string_nz (start
), insn
);
768 else if (decode_ret (start
, insn
, &rn
))
770 /* Stop analysis on branch. */
773 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
775 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
776 is64
? 8 : 4, regs
[rt
]);
778 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
780 /* If recording this store would invalidate the store area
781 (perhaps because rn is not known) then we should abandon
782 further prologue analysis. */
783 if (pv_area_store_would_trash (stack
,
784 pv_add_constant (regs
[rn
], imm
)))
787 if (pv_area_store_would_trash (stack
,
788 pv_add_constant (regs
[rn
], imm
+ 8)))
791 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
793 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
796 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
798 /* If recording this store would invalidate the store area
799 (perhaps because rn is not known) then we should abandon
800 further prologue analysis. */
801 if (pv_area_store_would_trash (stack
,
802 pv_add_constant (regs
[rn
], imm
)))
805 if (pv_area_store_would_trash (stack
,
806 pv_add_constant (regs
[rn
], imm
+ 8)))
809 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
811 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
813 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
815 else if (decode_tb (start
, insn
, &is_tbnz
, &bit
, &rn
, &offset
))
817 /* Stop analysis on branch. */
824 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
826 core_addr_to_string_nz (start
), insn
);
834 do_cleanups (back_to
);
838 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
840 /* Frame pointer is fp. Frame size is constant. */
841 cache
->framereg
= AARCH64_FP_REGNUM
;
842 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
844 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
846 /* Try the stack pointer. */
847 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
848 cache
->framereg
= AARCH64_SP_REGNUM
;
852 /* We're just out of luck. We don't know where the frame is. */
853 cache
->framereg
= -1;
854 cache
->framesize
= 0;
857 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
861 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
862 cache
->saved_regs
[i
].addr
= offset
;
865 do_cleanups (back_to
);
869 /* Implement the "skip_prologue" gdbarch method. */
872 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
876 CORE_ADDR func_addr
, limit_pc
;
877 struct symtab_and_line sal
;
879 /* See if we can determine the end of the prologue via the symbol
880 table. If so, then return either PC, or the PC after the
881 prologue, whichever is greater. */
882 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
884 CORE_ADDR post_prologue_pc
885 = skip_prologue_using_sal (gdbarch
, func_addr
);
887 if (post_prologue_pc
!= 0)
888 return max (pc
, post_prologue_pc
);
891 /* Can't determine prologue from the symbol table, need to examine
894 /* Find an upper limit on the function prologue using the debug
895 information. If the debug information could not be used to
896 provide that bound, then use an arbitrary large number as the
898 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
900 limit_pc
= pc
+ 128; /* Magic. */
902 /* Try disassembling prologue. */
903 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
906 /* Scan the function prologue for THIS_FRAME and populate the prologue
910 aarch64_scan_prologue (struct frame_info
*this_frame
,
911 struct aarch64_prologue_cache
*cache
)
913 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
914 CORE_ADDR prologue_start
;
915 CORE_ADDR prologue_end
;
916 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
917 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
919 cache
->prev_pc
= prev_pc
;
921 /* Assume we do not find a frame. */
922 cache
->framereg
= -1;
923 cache
->framesize
= 0;
925 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
928 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
932 /* No line info so use the current PC. */
933 prologue_end
= prev_pc
;
935 else if (sal
.end
< prologue_end
)
937 /* The next line begins after the function end. */
938 prologue_end
= sal
.end
;
941 prologue_end
= min (prologue_end
, prev_pc
);
942 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
949 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
951 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
955 cache
->framereg
= AARCH64_FP_REGNUM
;
956 cache
->framesize
= 16;
957 cache
->saved_regs
[29].addr
= 0;
958 cache
->saved_regs
[30].addr
= 8;
962 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
963 function may throw an exception if the inferior's registers or memory is
967 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
968 struct aarch64_prologue_cache
*cache
)
970 CORE_ADDR unwound_fp
;
973 aarch64_scan_prologue (this_frame
, cache
);
975 if (cache
->framereg
== -1)
978 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
982 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
984 /* Calculate actual addresses of saved registers using offsets
985 determined by aarch64_analyze_prologue. */
986 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
987 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
988 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
990 cache
->func
= get_frame_func (this_frame
);
992 cache
->available_p
= 1;
995 /* Allocate and fill in *THIS_CACHE with information about the prologue of
996 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
997 Return a pointer to the current aarch64_prologue_cache in
1000 static struct aarch64_prologue_cache
*
1001 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
1003 struct aarch64_prologue_cache
*cache
;
1005 if (*this_cache
!= NULL
)
1008 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1009 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1010 *this_cache
= cache
;
1014 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1016 CATCH (ex
, RETURN_MASK_ERROR
)
1018 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1019 throw_exception (ex
);
1026 /* Implement the "stop_reason" frame_unwind method. */
1028 static enum unwind_stop_reason
1029 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1032 struct aarch64_prologue_cache
*cache
1033 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1035 if (!cache
->available_p
)
1036 return UNWIND_UNAVAILABLE
;
1038 /* Halt the backtrace at "_start". */
1039 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1040 return UNWIND_OUTERMOST
;
1042 /* We've hit a wall, stop. */
1043 if (cache
->prev_sp
== 0)
1044 return UNWIND_OUTERMOST
;
1046 return UNWIND_NO_REASON
;
1049 /* Our frame ID for a normal frame is the current function's starting
1050 PC and the caller's SP when we were called. */
1053 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1054 void **this_cache
, struct frame_id
*this_id
)
1056 struct aarch64_prologue_cache
*cache
1057 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1059 if (!cache
->available_p
)
1060 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1062 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1065 /* Implement the "prev_register" frame_unwind method. */
1067 static struct value
*
1068 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1069 void **this_cache
, int prev_regnum
)
1071 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1072 struct aarch64_prologue_cache
*cache
1073 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1075 /* If we are asked to unwind the PC, then we need to return the LR
1076 instead. The prologue may save PC, but it will point into this
1077 frame's prologue, not the next frame's resume location. */
1078 if (prev_regnum
== AARCH64_PC_REGNUM
)
1082 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1083 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1086 /* SP is generally not saved to the stack, but this frame is
1087 identified by the next frame's stack pointer at the time of the
1088 call. The value was already reconstructed into PREV_SP. */
1094 | | | <- Previous SP
1097 +--| saved fp |<- FP
1101 if (prev_regnum
== AARCH64_SP_REGNUM
)
1102 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1105 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1109 /* AArch64 prologue unwinder. */
1110 struct frame_unwind aarch64_prologue_unwind
=
1113 aarch64_prologue_frame_unwind_stop_reason
,
1114 aarch64_prologue_this_id
,
1115 aarch64_prologue_prev_register
,
1117 default_frame_sniffer
1120 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1121 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1122 Return a pointer to the current aarch64_prologue_cache in
1125 static struct aarch64_prologue_cache
*
1126 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1128 struct aarch64_prologue_cache
*cache
;
1130 if (*this_cache
!= NULL
)
1133 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1134 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1135 *this_cache
= cache
;
1139 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1141 cache
->prev_pc
= get_frame_pc (this_frame
);
1142 cache
->available_p
= 1;
1144 CATCH (ex
, RETURN_MASK_ERROR
)
1146 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1147 throw_exception (ex
);
1154 /* Implement the "stop_reason" frame_unwind method. */
1156 static enum unwind_stop_reason
1157 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1160 struct aarch64_prologue_cache
*cache
1161 = aarch64_make_stub_cache (this_frame
, this_cache
);
1163 if (!cache
->available_p
)
1164 return UNWIND_UNAVAILABLE
;
1166 return UNWIND_NO_REASON
;
1169 /* Our frame ID for a stub frame is the current SP and LR. */
1172 aarch64_stub_this_id (struct frame_info
*this_frame
,
1173 void **this_cache
, struct frame_id
*this_id
)
1175 struct aarch64_prologue_cache
*cache
1176 = aarch64_make_stub_cache (this_frame
, this_cache
);
1178 if (cache
->available_p
)
1179 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1181 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1184 /* Implement the "sniffer" frame_unwind method. */
1187 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1188 struct frame_info
*this_frame
,
1189 void **this_prologue_cache
)
1191 CORE_ADDR addr_in_block
;
1194 addr_in_block
= get_frame_address_in_block (this_frame
);
1195 if (in_plt_section (addr_in_block
)
1196 /* We also use the stub winder if the target memory is unreadable
1197 to avoid having the prologue unwinder trying to read it. */
1198 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1204 /* AArch64 stub unwinder. */
1205 struct frame_unwind aarch64_stub_unwind
=
1208 aarch64_stub_frame_unwind_stop_reason
,
1209 aarch64_stub_this_id
,
1210 aarch64_prologue_prev_register
,
1212 aarch64_stub_unwind_sniffer
1215 /* Return the frame base address of *THIS_FRAME. */
1218 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1220 struct aarch64_prologue_cache
*cache
1221 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1223 return cache
->prev_sp
- cache
->framesize
;
1226 /* AArch64 default frame base information. */
1227 struct frame_base aarch64_normal_base
=
1229 &aarch64_prologue_unwind
,
1230 aarch64_normal_frame_base
,
1231 aarch64_normal_frame_base
,
1232 aarch64_normal_frame_base
1235 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1236 dummy frame. The frame ID's base needs to match the TOS value
1237 saved by save_dummy_frame_tos () and returned from
1238 aarch64_push_dummy_call, and the PC needs to match the dummy
1239 frame's breakpoint. */
1241 static struct frame_id
1242 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1244 return frame_id_build (get_frame_register_unsigned (this_frame
,
1246 get_frame_pc (this_frame
));
1249 /* Implement the "unwind_pc" gdbarch method. */
1252 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1255 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1260 /* Implement the "unwind_sp" gdbarch method. */
1263 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1265 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1268 /* Return the value of the REGNUM register in the previous frame of
1271 static struct value
*
1272 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1273 void **this_cache
, int regnum
)
1275 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1280 case AARCH64_PC_REGNUM
:
1281 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1282 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1285 internal_error (__FILE__
, __LINE__
,
1286 _("Unexpected register %d"), regnum
);
1290 /* Implement the "init_reg" dwarf2_frame_ops method. */
1293 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1294 struct dwarf2_frame_state_reg
*reg
,
1295 struct frame_info
*this_frame
)
1299 case AARCH64_PC_REGNUM
:
1300 reg
->how
= DWARF2_FRAME_REG_FN
;
1301 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1303 case AARCH64_SP_REGNUM
:
1304 reg
->how
= DWARF2_FRAME_REG_CFA
;
1309 /* When arguments must be pushed onto the stack, they go on in reverse
1310 order. The code below implements a FILO (stack) to do this. */
1314 /* Value to pass on stack. */
1317 /* Size in bytes of value to pass on stack. */
1321 DEF_VEC_O (stack_item_t
);
1323 /* Return the alignment (in bytes) of the given type. */
1326 aarch64_type_align (struct type
*t
)
1332 t
= check_typedef (t
);
1333 switch (TYPE_CODE (t
))
1336 /* Should never happen. */
1337 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1341 case TYPE_CODE_ENUM
:
1345 case TYPE_CODE_RANGE
:
1346 case TYPE_CODE_BITSTRING
:
1348 case TYPE_CODE_CHAR
:
1349 case TYPE_CODE_BOOL
:
1350 return TYPE_LENGTH (t
);
1352 case TYPE_CODE_ARRAY
:
1353 case TYPE_CODE_COMPLEX
:
1354 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1356 case TYPE_CODE_STRUCT
:
1357 case TYPE_CODE_UNION
:
1359 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1361 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1369 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1370 defined in the AAPCS64 ABI document; otherwise return 0. */
1373 is_hfa (struct type
*ty
)
1375 switch (TYPE_CODE (ty
))
1377 case TYPE_CODE_ARRAY
:
1379 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1380 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1385 case TYPE_CODE_UNION
:
1386 case TYPE_CODE_STRUCT
:
1388 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1390 struct type
*member0_type
;
1392 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1393 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1397 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1399 struct type
*member1_type
;
1401 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1402 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1403 || (TYPE_LENGTH (member0_type
)
1404 != TYPE_LENGTH (member1_type
)))
1420 /* AArch64 function call information structure. */
1421 struct aarch64_call_info
1423 /* the current argument number. */
1426 /* The next general purpose register number, equivalent to NGRN as
1427 described in the AArch64 Procedure Call Standard. */
1430 /* The next SIMD and floating point register number, equivalent to
1431 NSRN as described in the AArch64 Procedure Call Standard. */
1434 /* The next stacked argument address, equivalent to NSAA as
1435 described in the AArch64 Procedure Call Standard. */
1438 /* Stack item vector. */
1439 VEC(stack_item_t
) *si
;
1442 /* Pass a value in a sequence of consecutive X registers. The caller
1443 is responsbile for ensuring sufficient registers are available. */
1446 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1447 struct aarch64_call_info
*info
, struct type
*type
,
1448 const bfd_byte
*buf
)
1450 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1451 int len
= TYPE_LENGTH (type
);
1452 enum type_code typecode
= TYPE_CODE (type
);
1453 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1459 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1460 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1464 /* Adjust sub-word struct/union args when big-endian. */
1465 if (byte_order
== BFD_ENDIAN_BIG
1466 && partial_len
< X_REGISTER_SIZE
1467 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1468 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1472 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1473 gdbarch_register_name (gdbarch
, regnum
),
1474 phex (regval
, X_REGISTER_SIZE
));
1476 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1483 /* Attempt to marshall a value in a V register. Return 1 if
1484 successful, or 0 if insufficient registers are available. This
1485 function, unlike the equivalent pass_in_x() function does not
1486 handle arguments spread across multiple registers. */
1489 pass_in_v (struct gdbarch
*gdbarch
,
1490 struct regcache
*regcache
,
1491 struct aarch64_call_info
*info
,
1492 const bfd_byte
*buf
)
1496 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1497 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1502 regcache_cooked_write (regcache
, regnum
, buf
);
1505 debug_printf ("arg %d in %s\n", info
->argnum
,
1506 gdbarch_register_name (gdbarch
, regnum
));
1514 /* Marshall an argument onto the stack. */
1517 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1518 const bfd_byte
*buf
)
1520 int len
= TYPE_LENGTH (type
);
1526 align
= aarch64_type_align (type
);
1528 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1529 Natural alignment of the argument's type. */
1530 align
= align_up (align
, 8);
1532 /* The AArch64 PCS requires at most doubleword alignment. */
1538 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1544 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1547 if (info
->nsaa
& (align
- 1))
1549 /* Push stack alignment padding. */
1550 int pad
= align
- (info
->nsaa
& (align
- 1));
1555 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1560 /* Marshall an argument into a sequence of one or more consecutive X
1561 registers or, if insufficient X registers are available then onto
1565 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1566 struct aarch64_call_info
*info
, struct type
*type
,
1567 const bfd_byte
*buf
)
1569 int len
= TYPE_LENGTH (type
);
1570 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1572 /* PCS C.13 - Pass in registers if we have enough spare */
1573 if (info
->ngrn
+ nregs
<= 8)
1575 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1576 info
->ngrn
+= nregs
;
1581 pass_on_stack (info
, type
, buf
);
1585 /* Pass a value in a V register, or on the stack if insufficient are
1589 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1590 struct regcache
*regcache
,
1591 struct aarch64_call_info
*info
,
1593 const bfd_byte
*buf
)
1595 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1596 pass_on_stack (info
, type
, buf
);
1599 /* Implement the "push_dummy_call" gdbarch method. */
1602 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1603 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1605 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1606 CORE_ADDR struct_addr
)
1612 struct aarch64_call_info info
;
1613 struct type
*func_type
;
1614 struct type
*return_type
;
1615 int lang_struct_return
;
1617 memset (&info
, 0, sizeof (info
));
1619 /* We need to know what the type of the called function is in order
1620 to determine the number of named/anonymous arguments for the
1621 actual argument placement, and the return type in order to handle
1622 return value correctly.
1624 The generic code above us views the decision of return in memory
1625 or return in registers as a two stage processes. The language
1626 handler is consulted first and may decide to return in memory (eg
1627 class with copy constructor returned by value), this will cause
1628 the generic code to allocate space AND insert an initial leading
1631 If the language code does not decide to pass in memory then the
1632 target code is consulted.
1634 If the language code decides to pass in memory we want to move
1635 the pointer inserted as the initial argument from the argument
1636 list and into X8, the conventional AArch64 struct return pointer
1639 This is slightly awkward, ideally the flag "lang_struct_return"
1640 would be passed to the targets implementation of push_dummy_call.
1641 Rather that change the target interface we call the language code
1642 directly ourselves. */
1644 func_type
= check_typedef (value_type (function
));
1646 /* Dereference function pointer types. */
1647 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1648 func_type
= TYPE_TARGET_TYPE (func_type
);
1650 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1651 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1653 /* If language_pass_by_reference () returned true we will have been
1654 given an additional initial argument, a hidden pointer to the
1655 return slot in memory. */
1656 return_type
= TYPE_TARGET_TYPE (func_type
);
1657 lang_struct_return
= language_pass_by_reference (return_type
);
1659 /* Set the return address. For the AArch64, the return breakpoint
1660 is always at BP_ADDR. */
1661 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1663 /* If we were given an initial argument for the return slot because
1664 lang_struct_return was true, lose it. */
1665 if (lang_struct_return
)
1671 /* The struct_return pointer occupies X8. */
1672 if (struct_return
|| lang_struct_return
)
1676 debug_printf ("struct return in %s = 0x%s\n",
1677 gdbarch_register_name (gdbarch
,
1678 AARCH64_STRUCT_RETURN_REGNUM
),
1679 paddress (gdbarch
, struct_addr
));
1681 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1685 for (argnum
= 0; argnum
< nargs
; argnum
++)
1687 struct value
*arg
= args
[argnum
];
1688 struct type
*arg_type
;
1691 arg_type
= check_typedef (value_type (arg
));
1692 len
= TYPE_LENGTH (arg_type
);
1694 switch (TYPE_CODE (arg_type
))
1697 case TYPE_CODE_BOOL
:
1698 case TYPE_CODE_CHAR
:
1699 case TYPE_CODE_RANGE
:
1700 case TYPE_CODE_ENUM
:
1703 /* Promote to 32 bit integer. */
1704 if (TYPE_UNSIGNED (arg_type
))
1705 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1707 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1708 arg
= value_cast (arg_type
, arg
);
1710 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1711 value_contents (arg
));
1714 case TYPE_CODE_COMPLEX
:
1717 const bfd_byte
*buf
= value_contents (arg
);
1718 struct type
*target_type
=
1719 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1721 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1722 pass_in_v (gdbarch
, regcache
, &info
,
1723 buf
+ TYPE_LENGTH (target_type
));
1728 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1732 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1733 value_contents (arg
));
1736 case TYPE_CODE_STRUCT
:
1737 case TYPE_CODE_ARRAY
:
1738 case TYPE_CODE_UNION
:
1739 if (is_hfa (arg_type
))
1741 int elements
= TYPE_NFIELDS (arg_type
);
1743 /* Homogeneous Aggregates */
1744 if (info
.nsrn
+ elements
< 8)
1748 for (i
= 0; i
< elements
; i
++)
1750 /* We know that we have sufficient registers
1751 available therefore this will never fallback
1753 struct value
*field
=
1754 value_primitive_field (arg
, 0, i
, arg_type
);
1755 struct type
*field_type
=
1756 check_typedef (value_type (field
));
1758 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1759 value_contents_writeable (field
));
1765 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1770 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1771 invisible reference. */
1773 /* Allocate aligned storage. */
1774 sp
= align_down (sp
- len
, 16);
1776 /* Write the real data into the stack. */
1777 write_memory (sp
, value_contents (arg
), len
);
1779 /* Construct the indirection. */
1780 arg_type
= lookup_pointer_type (arg_type
);
1781 arg
= value_from_pointer (arg_type
, sp
);
1782 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1783 value_contents (arg
));
1786 /* PCS C.15 / C.18 multiple values pass. */
1787 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1788 value_contents (arg
));
1792 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1793 value_contents (arg
));
1798 /* Make sure stack retains 16 byte alignment. */
1800 sp
-= 16 - (info
.nsaa
& 15);
1802 while (!VEC_empty (stack_item_t
, info
.si
))
1804 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1807 write_memory (sp
, si
->data
, si
->len
);
1808 VEC_pop (stack_item_t
, info
.si
);
1811 VEC_free (stack_item_t
, info
.si
);
1813 /* Finally, update the SP register. */
1814 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1819 /* Implement the "frame_align" gdbarch method. */
1822 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1824 /* Align the stack to sixteen bytes. */
1825 return sp
& ~(CORE_ADDR
) 15;
1828 /* Return the type for an AdvSISD Q register. */
1830 static struct type
*
1831 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1833 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1835 if (tdep
->vnq_type
== NULL
)
1840 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1843 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1844 append_composite_type_field (t
, "u", elem
);
1846 elem
= builtin_type (gdbarch
)->builtin_int128
;
1847 append_composite_type_field (t
, "s", elem
);
1852 return tdep
->vnq_type
;
1855 /* Return the type for an AdvSISD D register. */
1857 static struct type
*
1858 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1860 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1862 if (tdep
->vnd_type
== NULL
)
1867 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1870 elem
= builtin_type (gdbarch
)->builtin_double
;
1871 append_composite_type_field (t
, "f", elem
);
1873 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1874 append_composite_type_field (t
, "u", elem
);
1876 elem
= builtin_type (gdbarch
)->builtin_int64
;
1877 append_composite_type_field (t
, "s", elem
);
1882 return tdep
->vnd_type
;
1885 /* Return the type for an AdvSISD S register. */
1887 static struct type
*
1888 aarch64_vns_type (struct gdbarch
*gdbarch
)
1890 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1892 if (tdep
->vns_type
== NULL
)
1897 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1900 elem
= builtin_type (gdbarch
)->builtin_float
;
1901 append_composite_type_field (t
, "f", elem
);
1903 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1904 append_composite_type_field (t
, "u", elem
);
1906 elem
= builtin_type (gdbarch
)->builtin_int32
;
1907 append_composite_type_field (t
, "s", elem
);
1912 return tdep
->vns_type
;
1915 /* Return the type for an AdvSISD H register. */
1917 static struct type
*
1918 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1920 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1922 if (tdep
->vnh_type
== NULL
)
1927 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1930 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1931 append_composite_type_field (t
, "u", elem
);
1933 elem
= builtin_type (gdbarch
)->builtin_int16
;
1934 append_composite_type_field (t
, "s", elem
);
1939 return tdep
->vnh_type
;
1942 /* Return the type for an AdvSISD B register. */
1944 static struct type
*
1945 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1947 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1949 if (tdep
->vnb_type
== NULL
)
1954 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1957 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1958 append_composite_type_field (t
, "u", elem
);
1960 elem
= builtin_type (gdbarch
)->builtin_int8
;
1961 append_composite_type_field (t
, "s", elem
);
1966 return tdep
->vnb_type
;
1969 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1972 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1974 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1975 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1977 if (reg
== AARCH64_DWARF_SP
)
1978 return AARCH64_SP_REGNUM
;
1980 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1981 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1987 /* Implement the "print_insn" gdbarch method. */
1990 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1992 info
->symbols
= NULL
;
1993 return print_insn_aarch64 (memaddr
, info
);
1996 /* AArch64 BRK software debug mode instruction.
1997 Note that AArch64 code is always little-endian.
1998 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1999 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
2001 /* Implement the "breakpoint_from_pc" gdbarch method. */
2003 static const gdb_byte
*
2004 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
2007 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2009 *lenptr
= sizeof (aarch64_default_breakpoint
);
2010 return aarch64_default_breakpoint
;
2013 /* Extract from an array REGS containing the (raw) register state a
2014 function return value of type TYPE, and copy that, in virtual
2015 format, into VALBUF. */
2018 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2021 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2022 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2024 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2026 bfd_byte buf
[V_REGISTER_SIZE
];
2027 int len
= TYPE_LENGTH (type
);
2029 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
2030 memcpy (valbuf
, buf
, len
);
2032 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2033 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2034 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2035 || TYPE_CODE (type
) == TYPE_CODE_PTR
2036 || TYPE_CODE (type
) == TYPE_CODE_REF
2037 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2039 /* If the the type is a plain integer, then the access is
2040 straight-forward. Otherwise we have to play around a bit
2042 int len
= TYPE_LENGTH (type
);
2043 int regno
= AARCH64_X0_REGNUM
;
2048 /* By using store_unsigned_integer we avoid having to do
2049 anything special for small big-endian values. */
2050 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2051 store_unsigned_integer (valbuf
,
2052 (len
> X_REGISTER_SIZE
2053 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2054 len
-= X_REGISTER_SIZE
;
2055 valbuf
+= X_REGISTER_SIZE
;
2058 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
2060 int regno
= AARCH64_V0_REGNUM
;
2061 bfd_byte buf
[V_REGISTER_SIZE
];
2062 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
2063 int len
= TYPE_LENGTH (target_type
);
2065 regcache_cooked_read (regs
, regno
, buf
);
2066 memcpy (valbuf
, buf
, len
);
2068 regcache_cooked_read (regs
, regno
+ 1, buf
);
2069 memcpy (valbuf
, buf
, len
);
2072 else if (is_hfa (type
))
2074 int elements
= TYPE_NFIELDS (type
);
2075 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2076 int len
= TYPE_LENGTH (member_type
);
2079 for (i
= 0; i
< elements
; i
++)
2081 int regno
= AARCH64_V0_REGNUM
+ i
;
2082 bfd_byte buf
[X_REGISTER_SIZE
];
2086 debug_printf ("read HFA return value element %d from %s\n",
2088 gdbarch_register_name (gdbarch
, regno
));
2090 regcache_cooked_read (regs
, regno
, buf
);
2092 memcpy (valbuf
, buf
, len
);
2098 /* For a structure or union the behaviour is as if the value had
2099 been stored to word-aligned memory and then loaded into
2100 registers with 64-bit load instruction(s). */
2101 int len
= TYPE_LENGTH (type
);
2102 int regno
= AARCH64_X0_REGNUM
;
2103 bfd_byte buf
[X_REGISTER_SIZE
];
2107 regcache_cooked_read (regs
, regno
++, buf
);
2108 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2109 len
-= X_REGISTER_SIZE
;
2110 valbuf
+= X_REGISTER_SIZE
;
2116 /* Will a function return an aggregate type in memory or in a
2117 register? Return 0 if an aggregate type can be returned in a
2118 register, 1 if it must be returned in memory. */
2121 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2124 enum type_code code
;
2126 type
= check_typedef (type
);
2128 /* In the AArch64 ABI, "integer" like aggregate types are returned
2129 in registers. For an aggregate type to be integer like, its size
2130 must be less than or equal to 4 * X_REGISTER_SIZE. */
2134 /* PCS B.5 If the argument is a Named HFA, then the argument is
2139 if (TYPE_LENGTH (type
) > 16)
2141 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2142 invisible reference. */
2150 /* Write into appropriate registers a function return value of type
2151 TYPE, given in virtual format. */
2154 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2155 const gdb_byte
*valbuf
)
2157 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2158 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2160 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2162 bfd_byte buf
[V_REGISTER_SIZE
];
2163 int len
= TYPE_LENGTH (type
);
2165 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2166 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2168 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2169 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2170 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2171 || TYPE_CODE (type
) == TYPE_CODE_PTR
2172 || TYPE_CODE (type
) == TYPE_CODE_REF
2173 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2175 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2177 /* Values of one word or less are zero/sign-extended and
2179 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2180 LONGEST val
= unpack_long (type
, valbuf
);
2182 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2183 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2187 /* Integral values greater than one word are stored in
2188 consecutive registers starting with r0. This will always
2189 be a multiple of the regiser size. */
2190 int len
= TYPE_LENGTH (type
);
2191 int regno
= AARCH64_X0_REGNUM
;
2195 regcache_cooked_write (regs
, regno
++, valbuf
);
2196 len
-= X_REGISTER_SIZE
;
2197 valbuf
+= X_REGISTER_SIZE
;
2201 else if (is_hfa (type
))
2203 int elements
= TYPE_NFIELDS (type
);
2204 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2205 int len
= TYPE_LENGTH (member_type
);
2208 for (i
= 0; i
< elements
; i
++)
2210 int regno
= AARCH64_V0_REGNUM
+ i
;
2211 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2215 debug_printf ("write HFA return value element %d to %s\n",
2217 gdbarch_register_name (gdbarch
, regno
));
2220 memcpy (tmpbuf
, valbuf
, len
);
2221 regcache_cooked_write (regs
, regno
, tmpbuf
);
2227 /* For a structure or union the behaviour is as if the value had
2228 been stored to word-aligned memory and then loaded into
2229 registers with 64-bit load instruction(s). */
2230 int len
= TYPE_LENGTH (type
);
2231 int regno
= AARCH64_X0_REGNUM
;
2232 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2236 memcpy (tmpbuf
, valbuf
,
2237 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2238 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2239 len
-= X_REGISTER_SIZE
;
2240 valbuf
+= X_REGISTER_SIZE
;
2245 /* Implement the "return_value" gdbarch method. */
2247 static enum return_value_convention
2248 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2249 struct type
*valtype
, struct regcache
*regcache
,
2250 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2252 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2254 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2255 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2256 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2258 if (aarch64_return_in_memory (gdbarch
, valtype
))
2261 debug_printf ("return value in memory\n");
2262 return RETURN_VALUE_STRUCT_CONVENTION
;
2267 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2270 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2273 debug_printf ("return value in registers\n");
2275 return RETURN_VALUE_REGISTER_CONVENTION
;
2278 /* Implement the "get_longjmp_target" gdbarch method. */
2281 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2284 gdb_byte buf
[X_REGISTER_SIZE
];
2285 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2286 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2287 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2289 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2291 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2295 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2299 /* Implement the "gen_return_address" gdbarch method. */
2302 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2303 struct agent_expr
*ax
, struct axs_value
*value
,
2306 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2307 value
->kind
= axs_lvalue_register
;
2308 value
->u
.reg
= AARCH64_LR_REGNUM
;
2312 /* Return the pseudo register name corresponding to register regnum. */
2315 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2317 static const char *const q_name
[] =
2319 "q0", "q1", "q2", "q3",
2320 "q4", "q5", "q6", "q7",
2321 "q8", "q9", "q10", "q11",
2322 "q12", "q13", "q14", "q15",
2323 "q16", "q17", "q18", "q19",
2324 "q20", "q21", "q22", "q23",
2325 "q24", "q25", "q26", "q27",
2326 "q28", "q29", "q30", "q31",
2329 static const char *const d_name
[] =
2331 "d0", "d1", "d2", "d3",
2332 "d4", "d5", "d6", "d7",
2333 "d8", "d9", "d10", "d11",
2334 "d12", "d13", "d14", "d15",
2335 "d16", "d17", "d18", "d19",
2336 "d20", "d21", "d22", "d23",
2337 "d24", "d25", "d26", "d27",
2338 "d28", "d29", "d30", "d31",
2341 static const char *const s_name
[] =
2343 "s0", "s1", "s2", "s3",
2344 "s4", "s5", "s6", "s7",
2345 "s8", "s9", "s10", "s11",
2346 "s12", "s13", "s14", "s15",
2347 "s16", "s17", "s18", "s19",
2348 "s20", "s21", "s22", "s23",
2349 "s24", "s25", "s26", "s27",
2350 "s28", "s29", "s30", "s31",
2353 static const char *const h_name
[] =
2355 "h0", "h1", "h2", "h3",
2356 "h4", "h5", "h6", "h7",
2357 "h8", "h9", "h10", "h11",
2358 "h12", "h13", "h14", "h15",
2359 "h16", "h17", "h18", "h19",
2360 "h20", "h21", "h22", "h23",
2361 "h24", "h25", "h26", "h27",
2362 "h28", "h29", "h30", "h31",
2365 static const char *const b_name
[] =
2367 "b0", "b1", "b2", "b3",
2368 "b4", "b5", "b6", "b7",
2369 "b8", "b9", "b10", "b11",
2370 "b12", "b13", "b14", "b15",
2371 "b16", "b17", "b18", "b19",
2372 "b20", "b21", "b22", "b23",
2373 "b24", "b25", "b26", "b27",
2374 "b28", "b29", "b30", "b31",
2377 regnum
-= gdbarch_num_regs (gdbarch
);
2379 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2380 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2382 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2383 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2385 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2386 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2388 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2389 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2391 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2392 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2394 internal_error (__FILE__
, __LINE__
,
2395 _("aarch64_pseudo_register_name: bad register number %d"),
2399 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2401 static struct type
*
2402 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2404 regnum
-= gdbarch_num_regs (gdbarch
);
2406 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2407 return aarch64_vnq_type (gdbarch
);
2409 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2410 return aarch64_vnd_type (gdbarch
);
2412 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2413 return aarch64_vns_type (gdbarch
);
2415 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2416 return aarch64_vnh_type (gdbarch
);
2418 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2419 return aarch64_vnb_type (gdbarch
);
2421 internal_error (__FILE__
, __LINE__
,
2422 _("aarch64_pseudo_register_type: bad register number %d"),
2426 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2429 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2430 struct reggroup
*group
)
2432 regnum
-= gdbarch_num_regs (gdbarch
);
2434 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2435 return group
== all_reggroup
|| group
== vector_reggroup
;
2436 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2437 return (group
== all_reggroup
|| group
== vector_reggroup
2438 || group
== float_reggroup
);
2439 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2440 return (group
== all_reggroup
|| group
== vector_reggroup
2441 || group
== float_reggroup
);
2442 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2443 return group
== all_reggroup
|| group
== vector_reggroup
;
2444 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2445 return group
== all_reggroup
|| group
== vector_reggroup
;
2447 return group
== all_reggroup
;
2450 /* Implement the "pseudo_register_read_value" gdbarch method. */
2452 static struct value
*
2453 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2454 struct regcache
*regcache
,
2457 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2458 struct value
*result_value
;
2461 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2462 VALUE_LVAL (result_value
) = lval_register
;
2463 VALUE_REGNUM (result_value
) = regnum
;
2464 buf
= value_contents_raw (result_value
);
2466 regnum
-= gdbarch_num_regs (gdbarch
);
2468 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2470 enum register_status status
;
2473 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2474 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2475 if (status
!= REG_VALID
)
2476 mark_value_bytes_unavailable (result_value
, 0,
2477 TYPE_LENGTH (value_type (result_value
)));
2479 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2480 return result_value
;
2483 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2485 enum register_status status
;
2488 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2489 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2490 if (status
!= REG_VALID
)
2491 mark_value_bytes_unavailable (result_value
, 0,
2492 TYPE_LENGTH (value_type (result_value
)));
2494 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2495 return result_value
;
2498 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2500 enum register_status status
;
2503 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2504 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2505 if (status
!= REG_VALID
)
2506 mark_value_bytes_unavailable (result_value
, 0,
2507 TYPE_LENGTH (value_type (result_value
)));
2509 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2510 return result_value
;
2513 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2515 enum register_status status
;
2518 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2519 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2520 if (status
!= REG_VALID
)
2521 mark_value_bytes_unavailable (result_value
, 0,
2522 TYPE_LENGTH (value_type (result_value
)));
2524 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2525 return result_value
;
2528 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2530 enum register_status status
;
2533 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2534 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2535 if (status
!= REG_VALID
)
2536 mark_value_bytes_unavailable (result_value
, 0,
2537 TYPE_LENGTH (value_type (result_value
)));
2539 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2540 return result_value
;
2543 gdb_assert_not_reached ("regnum out of bound");
2546 /* Implement the "pseudo_register_write" gdbarch method. */
2549 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2550 int regnum
, const gdb_byte
*buf
)
2552 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2554 /* Ensure the register buffer is zero, we want gdb writes of the
2555 various 'scalar' pseudo registers to behavior like architectural
2556 writes, register width bytes are written the remainder are set to
2558 memset (reg_buf
, 0, sizeof (reg_buf
));
2560 regnum
-= gdbarch_num_regs (gdbarch
);
2562 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2564 /* pseudo Q registers */
2567 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2568 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2569 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2573 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2575 /* pseudo D registers */
2578 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2579 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2580 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2584 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2588 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2589 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2590 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2594 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2596 /* pseudo H registers */
2599 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2600 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2601 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2605 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2607 /* pseudo B registers */
2610 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2611 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2612 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2616 gdb_assert_not_reached ("regnum out of bound");
2619 /* Callback function for user_reg_add. */
2621 static struct value
*
2622 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2624 const int *reg_p
= baton
;
2626 return value_of_register (*reg_p
, frame
);
2630 /* Implement the "software_single_step" gdbarch method, needed to
2631 single step through atomic sequences on AArch64. */
2634 aarch64_software_single_step (struct frame_info
*frame
)
2636 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2637 struct address_space
*aspace
= get_frame_address_space (frame
);
2638 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2639 const int insn_size
= 4;
2640 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2641 CORE_ADDR pc
= get_frame_pc (frame
);
2642 CORE_ADDR breaks
[2] = { -1, -1 };
2644 CORE_ADDR closing_insn
= 0;
2645 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2646 byte_order_for_code
);
2649 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2650 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2652 /* Look for a Load Exclusive instruction which begins the sequence. */
2653 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2656 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2662 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2663 byte_order_for_code
);
2665 /* Check if the instruction is a conditional branch. */
2666 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2668 if (bc_insn_count
>= 1)
2671 /* It is, so we'll try to set a breakpoint at the destination. */
2672 breaks
[1] = loc
+ offset
;
2678 /* Look for the Store Exclusive which closes the atomic sequence. */
2679 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2686 /* We didn't find a closing Store Exclusive instruction, fall back. */
2690 /* Insert breakpoint after the end of the atomic sequence. */
2691 breaks
[0] = loc
+ insn_size
;
2693 /* Check for duplicated breakpoints, and also check that the second
2694 breakpoint is not within the atomic sequence. */
2696 && (breaks
[1] == breaks
[0]
2697 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2698 last_breakpoint
= 0;
2700 /* Insert the breakpoint at the end of the sequence, and one at the
2701 destination of the conditional branch, if it exists. */
2702 for (index
= 0; index
<= last_breakpoint
; index
++)
2703 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2708 /* Initialize the current architecture based on INFO. If possible,
2709 re-use an architecture from ARCHES, which is a list of
2710 architectures already created during this debugging session.
2712 Called e.g. at program startup, when reading a core file, and when
2713 reading a binary file. */
2715 static struct gdbarch
*
2716 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2718 struct gdbarch_tdep
*tdep
;
2719 struct gdbarch
*gdbarch
;
2720 struct gdbarch_list
*best_arch
;
2721 struct tdesc_arch_data
*tdesc_data
= NULL
;
2722 const struct target_desc
*tdesc
= info
.target_desc
;
2724 int have_fpa_registers
= 1;
2726 const struct tdesc_feature
*feature
;
2728 int num_pseudo_regs
= 0;
2730 /* Ensure we always have a target descriptor. */
2731 if (!tdesc_has_registers (tdesc
))
2732 tdesc
= tdesc_aarch64
;
2736 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2738 if (feature
== NULL
)
2741 tdesc_data
= tdesc_data_alloc ();
2743 /* Validate the descriptor provides the mandatory core R registers
2744 and allocate their numbers. */
2745 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2747 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2748 aarch64_r_register_names
[i
]);
2750 num_regs
= AARCH64_X0_REGNUM
+ i
;
2752 /* Look for the V registers. */
2753 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2756 /* Validate the descriptor provides the mandatory V registers
2757 and allocate their numbers. */
2758 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2760 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2761 aarch64_v_register_names
[i
]);
2763 num_regs
= AARCH64_V0_REGNUM
+ i
;
2765 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2766 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2767 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2768 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2769 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2774 tdesc_data_cleanup (tdesc_data
);
2778 /* AArch64 code is always little-endian. */
2779 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2781 /* If there is already a candidate, use it. */
2782 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2784 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2786 /* Found a match. */
2790 if (best_arch
!= NULL
)
2792 if (tdesc_data
!= NULL
)
2793 tdesc_data_cleanup (tdesc_data
);
2794 return best_arch
->gdbarch
;
2797 tdep
= XCNEW (struct gdbarch_tdep
);
2798 gdbarch
= gdbarch_alloc (&info
, tdep
);
2800 /* This should be low enough for everything. */
2801 tdep
->lowest_pc
= 0x20;
2802 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2803 tdep
->jb_elt_size
= 8;
2805 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2806 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2808 /* Frame handling. */
2809 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2810 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2811 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2813 /* Advance PC across function entry code. */
2814 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2816 /* The stack grows downward. */
2817 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2819 /* Breakpoint manipulation. */
2820 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2821 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2822 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2824 /* Information about registers, etc. */
2825 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2826 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2827 set_gdbarch_num_regs (gdbarch
, num_regs
);
2829 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2830 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2831 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2832 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2833 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2834 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2835 aarch64_pseudo_register_reggroup_p
);
2838 set_gdbarch_short_bit (gdbarch
, 16);
2839 set_gdbarch_int_bit (gdbarch
, 32);
2840 set_gdbarch_float_bit (gdbarch
, 32);
2841 set_gdbarch_double_bit (gdbarch
, 64);
2842 set_gdbarch_long_double_bit (gdbarch
, 128);
2843 set_gdbarch_long_bit (gdbarch
, 64);
2844 set_gdbarch_long_long_bit (gdbarch
, 64);
2845 set_gdbarch_ptr_bit (gdbarch
, 64);
2846 set_gdbarch_char_signed (gdbarch
, 0);
2847 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2848 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2849 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2851 /* Internal <-> external register number maps. */
2852 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2854 /* Returning results. */
2855 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2858 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2860 /* Virtual tables. */
2861 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2863 /* Hook in the ABI-specific overrides, if they have been registered. */
2864 info
.target_desc
= tdesc
;
2865 info
.tdep_info
= (void *) tdesc_data
;
2866 gdbarch_init_osabi (info
, gdbarch
);
2868 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2870 /* Add some default predicates. */
2871 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2872 dwarf2_append_unwinders (gdbarch
);
2873 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2875 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2877 /* Now we have tuned the configuration, set a few final things,
2878 based on what the OS ABI has told us. */
2880 if (tdep
->jb_pc
>= 0)
2881 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2883 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
2885 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2887 /* Add standard register aliases. */
2888 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2889 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2890 value_of_aarch64_user_reg
,
2891 &aarch64_register_aliases
[i
].regnum
);
2897 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2899 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2904 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2905 paddress (gdbarch
, tdep
->lowest_pc
));
2908 /* Suppress warning from -Wmissing-prototypes. */
2909 extern initialize_file_ftype _initialize_aarch64_tdep
;
2912 _initialize_aarch64_tdep (void)
2914 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2917 initialize_tdesc_aarch64 ();
2919 /* Debug this file's internals. */
2920 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2921 Set AArch64 debugging."), _("\
2922 Show AArch64 debugging."), _("\
2923 When on, AArch64 specific debugging is enabled."),
2926 &setdebuglist
, &showdebuglist
);
2929 /* AArch64 process record-replay related structures, defines etc. */
2931 #define submask(x) ((1L << ((x) + 1)) - 1)
2932 #define bit(obj,st) (((obj) >> (st)) & 1)
2933 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2935 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2938 unsigned int reg_len = LENGTH; \
2941 REGS = XNEWVEC (uint32_t, reg_len); \
2942 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2947 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2950 unsigned int mem_len = LENGTH; \
2953 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2954 memcpy(&MEMS->len, &RECORD_BUF[0], \
2955 sizeof(struct aarch64_mem_r) * LENGTH); \
2960 /* AArch64 record/replay structures and enumerations. */
2962 struct aarch64_mem_r
2964 uint64_t len
; /* Record length. */
2965 uint64_t addr
; /* Memory address. */
2968 enum aarch64_record_result
2970 AARCH64_RECORD_SUCCESS
,
2971 AARCH64_RECORD_FAILURE
,
2972 AARCH64_RECORD_UNSUPPORTED
,
2973 AARCH64_RECORD_UNKNOWN
2976 typedef struct insn_decode_record_t
2978 struct gdbarch
*gdbarch
;
2979 struct regcache
*regcache
;
2980 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2981 uint32_t aarch64_insn
; /* Insn to be recorded. */
2982 uint32_t mem_rec_count
; /* Count of memory records. */
2983 uint32_t reg_rec_count
; /* Count of register records. */
2984 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2985 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2986 } insn_decode_record
;
2988 /* Record handler for data processing - register instructions. */
2991 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2993 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2994 uint32_t record_buf
[4];
2996 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2997 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2998 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3000 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3004 /* Logical (shifted register). */
3005 if (insn_bits24_27
== 0x0a)
3006 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3008 else if (insn_bits24_27
== 0x0b)
3009 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3011 return AARCH64_RECORD_UNKNOWN
;
3013 record_buf
[0] = reg_rd
;
3014 aarch64_insn_r
->reg_rec_count
= 1;
3016 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3020 if (insn_bits24_27
== 0x0b)
3022 /* Data-processing (3 source). */
3023 record_buf
[0] = reg_rd
;
3024 aarch64_insn_r
->reg_rec_count
= 1;
3026 else if (insn_bits24_27
== 0x0a)
3028 if (insn_bits21_23
== 0x00)
3030 /* Add/subtract (with carry). */
3031 record_buf
[0] = reg_rd
;
3032 aarch64_insn_r
->reg_rec_count
= 1;
3033 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3035 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3036 aarch64_insn_r
->reg_rec_count
= 2;
3039 else if (insn_bits21_23
== 0x02)
3041 /* Conditional compare (register) and conditional compare
3042 (immediate) instructions. */
3043 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3044 aarch64_insn_r
->reg_rec_count
= 1;
3046 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3048 /* CConditional select. */
3049 /* Data-processing (2 source). */
3050 /* Data-processing (1 source). */
3051 record_buf
[0] = reg_rd
;
3052 aarch64_insn_r
->reg_rec_count
= 1;
3055 return AARCH64_RECORD_UNKNOWN
;
3059 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3061 return AARCH64_RECORD_SUCCESS
;
3064 /* Record handler for data processing - immediate instructions. */
3067 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3069 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
3070 uint32_t record_buf
[4];
3072 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3073 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3074 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3075 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3077 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3078 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3079 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3081 record_buf
[0] = reg_rd
;
3082 aarch64_insn_r
->reg_rec_count
= 1;
3084 else if (insn_bits24_27
== 0x01)
3086 /* Add/Subtract (immediate). */
3087 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3088 record_buf
[0] = reg_rd
;
3089 aarch64_insn_r
->reg_rec_count
= 1;
3091 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3093 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3095 /* Logical (immediate). */
3096 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3097 record_buf
[0] = reg_rd
;
3098 aarch64_insn_r
->reg_rec_count
= 1;
3100 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3103 return AARCH64_RECORD_UNKNOWN
;
3105 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3107 return AARCH64_RECORD_SUCCESS
;
3110 /* Record handler for branch, exception generation and system instructions. */
3113 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3115 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3116 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3117 uint32_t record_buf
[4];
3119 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3120 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3121 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3123 if (insn_bits28_31
== 0x0d)
3125 /* Exception generation instructions. */
3126 if (insn_bits24_27
== 0x04)
3128 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3129 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3130 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3132 ULONGEST svc_number
;
3134 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3136 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3140 return AARCH64_RECORD_UNSUPPORTED
;
3142 /* System instructions. */
3143 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3145 uint32_t reg_rt
, reg_crn
;
3147 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3148 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3150 /* Record rt in case of sysl and mrs instructions. */
3151 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3153 record_buf
[0] = reg_rt
;
3154 aarch64_insn_r
->reg_rec_count
= 1;
3156 /* Record cpsr for hint and msr(immediate) instructions. */
3157 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3159 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3160 aarch64_insn_r
->reg_rec_count
= 1;
3163 /* Unconditional branch (register). */
3164 else if((insn_bits24_27
& 0x0e) == 0x06)
3166 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3167 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3168 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3171 return AARCH64_RECORD_UNKNOWN
;
3173 /* Unconditional branch (immediate). */
3174 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3176 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3177 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3178 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3181 /* Compare & branch (immediate), Test & branch (immediate) and
3182 Conditional branch (immediate). */
3183 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3185 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3187 return AARCH64_RECORD_SUCCESS
;
3190 /* Record handler for advanced SIMD load and store instructions. */
3193 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3196 uint64_t addr_offset
= 0;
3197 uint32_t record_buf
[24];
3198 uint64_t record_buf_mem
[24];
3199 uint32_t reg_rn
, reg_rt
;
3200 uint32_t reg_index
= 0, mem_index
= 0;
3201 uint8_t opcode_bits
, size_bits
;
3203 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3204 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3205 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3206 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3207 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3210 debug_printf ("Process record: Advanced SIMD load/store\n");
3212 /* Load/store single structure. */
3213 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3215 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3216 scale
= opcode_bits
>> 2;
3217 selem
= ((opcode_bits
& 0x02) |
3218 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3222 if (size_bits
& 0x01)
3223 return AARCH64_RECORD_UNKNOWN
;
3226 if ((size_bits
>> 1) & 0x01)
3227 return AARCH64_RECORD_UNKNOWN
;
3228 if (size_bits
& 0x01)
3230 if (!((opcode_bits
>> 1) & 0x01))
3233 return AARCH64_RECORD_UNKNOWN
;
3237 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3244 return AARCH64_RECORD_UNKNOWN
;
3250 for (sindex
= 0; sindex
< selem
; sindex
++)
3252 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3253 reg_rt
= (reg_rt
+ 1) % 32;
3257 for (sindex
= 0; sindex
< selem
; sindex
++)
3258 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3259 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3262 record_buf_mem
[mem_index
++] = esize
/ 8;
3263 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3265 addr_offset
= addr_offset
+ (esize
/ 8);
3266 reg_rt
= (reg_rt
+ 1) % 32;
3269 /* Load/store multiple structure. */
3272 uint8_t selem
, esize
, rpt
, elements
;
3273 uint8_t eindex
, rindex
;
3275 esize
= 8 << size_bits
;
3276 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3277 elements
= 128 / esize
;
3279 elements
= 64 / esize
;
3281 switch (opcode_bits
)
3283 /*LD/ST4 (4 Registers). */
3288 /*LD/ST1 (4 Registers). */
3293 /*LD/ST3 (3 Registers). */
3298 /*LD/ST1 (3 Registers). */
3303 /*LD/ST1 (1 Register). */
3308 /*LD/ST2 (2 Registers). */
3313 /*LD/ST1 (2 Registers). */
3319 return AARCH64_RECORD_UNSUPPORTED
;
3322 for (rindex
= 0; rindex
< rpt
; rindex
++)
3323 for (eindex
= 0; eindex
< elements
; eindex
++)
3325 uint8_t reg_tt
, sindex
;
3326 reg_tt
= (reg_rt
+ rindex
) % 32;
3327 for (sindex
= 0; sindex
< selem
; sindex
++)
3329 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3330 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3333 record_buf_mem
[mem_index
++] = esize
/ 8;
3334 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3336 addr_offset
= addr_offset
+ (esize
/ 8);
3337 reg_tt
= (reg_tt
+ 1) % 32;
3342 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3343 record_buf
[reg_index
++] = reg_rn
;
3345 aarch64_insn_r
->reg_rec_count
= reg_index
;
3346 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3347 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3349 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3351 return AARCH64_RECORD_SUCCESS
;
3354 /* Record handler for load and store instructions. */
3357 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3359 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3360 uint8_t insn_bit23
, insn_bit21
;
3361 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3362 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3363 uint64_t datasize
, offset
;
3364 uint32_t record_buf
[8];
3365 uint64_t record_buf_mem
[8];
3368 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3369 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3370 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3371 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3372 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3373 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3374 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3375 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3376 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3377 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3378 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3380 /* Load/store exclusive. */
3381 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3384 debug_printf ("Process record: load/store exclusive\n");
3388 record_buf
[0] = reg_rt
;
3389 aarch64_insn_r
->reg_rec_count
= 1;
3392 record_buf
[1] = reg_rt2
;
3393 aarch64_insn_r
->reg_rec_count
= 2;
3399 datasize
= (8 << size_bits
) * 2;
3401 datasize
= (8 << size_bits
);
3402 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3404 record_buf_mem
[0] = datasize
/ 8;
3405 record_buf_mem
[1] = address
;
3406 aarch64_insn_r
->mem_rec_count
= 1;
3409 /* Save register rs. */
3410 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3411 aarch64_insn_r
->reg_rec_count
= 1;
3415 /* Load register (literal) instructions decoding. */
3416 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3419 debug_printf ("Process record: load register (literal)\n");
3421 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3423 record_buf
[0] = reg_rt
;
3424 aarch64_insn_r
->reg_rec_count
= 1;
3426 /* All types of load/store pair instructions decoding. */
3427 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3430 debug_printf ("Process record: load/store pair\n");
3436 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3437 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3441 record_buf
[0] = reg_rt
;
3442 record_buf
[1] = reg_rt2
;
3444 aarch64_insn_r
->reg_rec_count
= 2;
3449 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3451 size_bits
= size_bits
>> 1;
3452 datasize
= 8 << (2 + size_bits
);
3453 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3454 offset
= offset
<< (2 + size_bits
);
3455 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3457 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3459 if (imm7_off
& 0x40)
3460 address
= address
- offset
;
3462 address
= address
+ offset
;
3465 record_buf_mem
[0] = datasize
/ 8;
3466 record_buf_mem
[1] = address
;
3467 record_buf_mem
[2] = datasize
/ 8;
3468 record_buf_mem
[3] = address
+ (datasize
/ 8);
3469 aarch64_insn_r
->mem_rec_count
= 2;
3471 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3472 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3474 /* Load/store register (unsigned immediate) instructions. */
3475 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3477 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3484 if (size_bits
!= 0x03)
3487 return AARCH64_RECORD_UNKNOWN
;
3491 debug_printf ("Process record: load/store (unsigned immediate):"
3492 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3498 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3499 datasize
= 8 << size_bits
;
3500 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3502 offset
= offset
<< size_bits
;
3503 address
= address
+ offset
;
3505 record_buf_mem
[0] = datasize
>> 3;
3506 record_buf_mem
[1] = address
;
3507 aarch64_insn_r
->mem_rec_count
= 1;
3512 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3514 record_buf
[0] = reg_rt
;
3515 aarch64_insn_r
->reg_rec_count
= 1;
3518 /* Load/store register (register offset) instructions. */
3519 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3520 && insn_bits10_11
== 0x02 && insn_bit21
)
3523 debug_printf ("Process record: load/store (register offset)\n");
3524 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3531 if (size_bits
!= 0x03)
3534 return AARCH64_RECORD_UNKNOWN
;
3538 uint64_t reg_rm_val
;
3539 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3540 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3541 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3542 offset
= reg_rm_val
<< size_bits
;
3544 offset
= reg_rm_val
;
3545 datasize
= 8 << size_bits
;
3546 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3548 address
= address
+ offset
;
3549 record_buf_mem
[0] = datasize
>> 3;
3550 record_buf_mem
[1] = address
;
3551 aarch64_insn_r
->mem_rec_count
= 1;
3556 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3558 record_buf
[0] = reg_rt
;
3559 aarch64_insn_r
->reg_rec_count
= 1;
3562 /* Load/store register (immediate and unprivileged) instructions. */
3563 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3568 debug_printf ("Process record: load/store "
3569 "(immediate and unprivileged)\n");
3571 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3578 if (size_bits
!= 0x03)
3581 return AARCH64_RECORD_UNKNOWN
;
3586 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3587 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3588 datasize
= 8 << size_bits
;
3589 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3591 if (insn_bits10_11
!= 0x01)
3593 if (imm9_off
& 0x0100)
3594 address
= address
- offset
;
3596 address
= address
+ offset
;
3598 record_buf_mem
[0] = datasize
>> 3;
3599 record_buf_mem
[1] = address
;
3600 aarch64_insn_r
->mem_rec_count
= 1;
3605 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3607 record_buf
[0] = reg_rt
;
3608 aarch64_insn_r
->reg_rec_count
= 1;
3610 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3611 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3613 /* Advanced SIMD load/store instructions. */
3615 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3617 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3619 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3621 return AARCH64_RECORD_SUCCESS
;
3624 /* Record handler for data processing SIMD and floating point instructions. */
3627 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3629 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3630 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3631 uint8_t insn_bits11_14
;
3632 uint32_t record_buf
[2];
3634 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3635 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3636 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3637 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3638 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3639 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3640 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3641 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3642 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3645 debug_printf ("Process record: data processing SIMD/FP: ");
3647 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3649 /* Floating point - fixed point conversion instructions. */
3653 debug_printf ("FP - fixed point conversion");
3655 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3656 record_buf
[0] = reg_rd
;
3658 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3660 /* Floating point - conditional compare instructions. */
3661 else if (insn_bits10_11
== 0x01)
3664 debug_printf ("FP - conditional compare");
3666 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3668 /* Floating point - data processing (2-source) and
3669 conditional select instructions. */
3670 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3673 debug_printf ("FP - DP (2-source)");
3675 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3677 else if (insn_bits10_11
== 0x00)
3679 /* Floating point - immediate instructions. */
3680 if ((insn_bits12_15
& 0x01) == 0x01
3681 || (insn_bits12_15
& 0x07) == 0x04)
3684 debug_printf ("FP - immediate");
3685 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3687 /* Floating point - compare instructions. */
3688 else if ((insn_bits12_15
& 0x03) == 0x02)
3691 debug_printf ("FP - immediate");
3692 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3694 /* Floating point - integer conversions instructions. */
3695 else if (insn_bits12_15
== 0x00)
3697 /* Convert float to integer instruction. */
3698 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3701 debug_printf ("float to int conversion");
3703 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3705 /* Convert integer to float instruction. */
3706 else if ((opcode
>> 1) == 0x01 && !rmode
)
3709 debug_printf ("int to float conversion");
3711 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3713 /* Move float to integer instruction. */
3714 else if ((opcode
>> 1) == 0x03)
3717 debug_printf ("move float to int");
3719 if (!(opcode
& 0x01))
3720 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3722 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3725 return AARCH64_RECORD_UNKNOWN
;
3728 return AARCH64_RECORD_UNKNOWN
;
3731 return AARCH64_RECORD_UNKNOWN
;
3733 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3736 debug_printf ("SIMD copy");
3738 /* Advanced SIMD copy instructions. */
3739 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3740 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3741 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3743 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3744 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3746 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3749 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3751 /* All remaining floating point or advanced SIMD instructions. */
3755 debug_printf ("all remain");
3757 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3761 debug_printf ("\n");
3763 aarch64_insn_r
->reg_rec_count
++;
3764 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3765 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3767 return AARCH64_RECORD_SUCCESS
;
3770 /* Decodes insns type and invokes its record handler. */
3773 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3775 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3777 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3778 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3779 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3780 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3782 /* Data processing - immediate instructions. */
3783 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3784 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3786 /* Branch, exception generation and system instructions. */
3787 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3788 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3790 /* Load and store instructions. */
3791 if (!ins_bit25
&& ins_bit27
)
3792 return aarch64_record_load_store (aarch64_insn_r
);
3794 /* Data processing - register instructions. */
3795 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3796 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3798 /* Data processing - SIMD and floating point instructions. */
3799 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3800 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3802 return AARCH64_RECORD_UNSUPPORTED
;
3805 /* Cleans up local record registers and memory allocations. */
3808 deallocate_reg_mem (insn_decode_record
*record
)
3810 xfree (record
->aarch64_regs
);
3811 xfree (record
->aarch64_mems
);
3814 /* Parse the current instruction and record the values of the registers and
3815 memory that will be changed in current instruction to record_arch_list
3816 return -1 if something is wrong. */
3819 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3820 CORE_ADDR insn_addr
)
3822 uint32_t rec_no
= 0;
3823 uint8_t insn_size
= 4;
3825 ULONGEST t_bit
= 0, insn_id
= 0;
3826 gdb_byte buf
[insn_size
];
3827 insn_decode_record aarch64_record
;
3829 memset (&buf
[0], 0, insn_size
);
3830 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3831 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3832 aarch64_record
.aarch64_insn
3833 = (uint32_t) extract_unsigned_integer (&buf
[0],
3835 gdbarch_byte_order (gdbarch
));
3836 aarch64_record
.regcache
= regcache
;
3837 aarch64_record
.this_addr
= insn_addr
;
3838 aarch64_record
.gdbarch
= gdbarch
;
3840 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3841 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3843 printf_unfiltered (_("Process record does not support instruction "
3844 "0x%0x at address %s.\n"),
3845 aarch64_record
.aarch64_insn
,
3846 paddress (gdbarch
, insn_addr
));
3852 /* Record registers. */
3853 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3855 /* Always record register CPSR. */
3856 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3857 AARCH64_CPSR_REGNUM
);
3858 if (aarch64_record
.aarch64_regs
)
3859 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3860 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3861 aarch64_record
.aarch64_regs
[rec_no
]))
3864 /* Record memories. */
3865 if (aarch64_record
.aarch64_mems
)
3866 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3867 if (record_full_arch_list_add_mem
3868 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3869 aarch64_record
.aarch64_mems
[rec_no
].len
))
3872 if (record_full_arch_list_add_end ())
3876 deallocate_reg_mem (&aarch64_record
);