1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 /* Pseudo register base numbers. */
61 #define AARCH64_Q0_REGNUM 0
62 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
67 /* The standard register names, and all the valid aliases for them. */
70 const char *const name
;
72 } aarch64_register_aliases
[] =
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM
},
76 {"lr", AARCH64_LR_REGNUM
},
77 {"sp", AARCH64_SP_REGNUM
},
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM
+ 0},
81 {"w1", AARCH64_X0_REGNUM
+ 1},
82 {"w2", AARCH64_X0_REGNUM
+ 2},
83 {"w3", AARCH64_X0_REGNUM
+ 3},
84 {"w4", AARCH64_X0_REGNUM
+ 4},
85 {"w5", AARCH64_X0_REGNUM
+ 5},
86 {"w6", AARCH64_X0_REGNUM
+ 6},
87 {"w7", AARCH64_X0_REGNUM
+ 7},
88 {"w8", AARCH64_X0_REGNUM
+ 8},
89 {"w9", AARCH64_X0_REGNUM
+ 9},
90 {"w10", AARCH64_X0_REGNUM
+ 10},
91 {"w11", AARCH64_X0_REGNUM
+ 11},
92 {"w12", AARCH64_X0_REGNUM
+ 12},
93 {"w13", AARCH64_X0_REGNUM
+ 13},
94 {"w14", AARCH64_X0_REGNUM
+ 14},
95 {"w15", AARCH64_X0_REGNUM
+ 15},
96 {"w16", AARCH64_X0_REGNUM
+ 16},
97 {"w17", AARCH64_X0_REGNUM
+ 17},
98 {"w18", AARCH64_X0_REGNUM
+ 18},
99 {"w19", AARCH64_X0_REGNUM
+ 19},
100 {"w20", AARCH64_X0_REGNUM
+ 20},
101 {"w21", AARCH64_X0_REGNUM
+ 21},
102 {"w22", AARCH64_X0_REGNUM
+ 22},
103 {"w23", AARCH64_X0_REGNUM
+ 23},
104 {"w24", AARCH64_X0_REGNUM
+ 24},
105 {"w25", AARCH64_X0_REGNUM
+ 25},
106 {"w26", AARCH64_X0_REGNUM
+ 26},
107 {"w27", AARCH64_X0_REGNUM
+ 27},
108 {"w28", AARCH64_X0_REGNUM
+ 28},
109 {"w29", AARCH64_X0_REGNUM
+ 29},
110 {"w30", AARCH64_X0_REGNUM
+ 30},
113 {"ip0", AARCH64_X0_REGNUM
+ 16},
114 {"ip1", AARCH64_X0_REGNUM
+ 17}
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names
[] =
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names
[] =
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
150 /* AArch64 prologue cache structure. */
151 struct aarch64_prologue_cache
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
167 /* Is the target available to read from? */
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
175 /* The register used to hold the frame pointer for this frame. */
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg
*saved_regs
;
182 /* Toggle this file's internal debugging dump. */
183 static int aarch64_debug
;
186 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
187 struct cmd_list_element
*c
, const char *value
)
189 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
192 /* Extract a signed value from a bit field within an instruction
195 INSN is the instruction opcode.
197 WIDTH specifies the width of the bit field to extract (in bits).
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
203 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
205 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
206 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
208 return ((int32_t) insn
<< shift_l
) >> shift_r
;
211 /* Determine if specified bits within an instruction opcode matches a
214 INSN is the instruction opcode.
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
220 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
222 return (insn
& mask
) == pattern
;
225 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
234 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
237 if ((insn
& 0x9f000000) == 0x91000000)
242 *rd
= (insn
>> 0) & 0x1f;
243 *rn
= (insn
>> 5) & 0x1f;
244 *imm
= (insn
>> 10) & 0xfff;
245 shift
= (insn
>> 22) & 0x3;
246 op_is_sub
= (insn
>> 30) & 0x1;
264 fprintf_unfiltered (gdb_stdlog
,
265 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
273 /* Decode an opcode if it represents an ADRP instruction.
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 RD receives the 'rd' field from the decoded instruction.
279 Return 1 if the opcodes matches and is decoded, otherwise 0. */
282 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
284 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
286 *rd
= (insn
>> 0) & 0x1f;
289 fprintf_unfiltered (gdb_stdlog
,
290 "decode: 0x%s 0x%x adrp x%u, #?\n",
291 core_addr_to_string_nz (addr
), insn
, *rd
);
297 /* Decode an opcode if it represents an branch immediate or branch
298 and link immediate instruction.
300 ADDR specifies the address of the opcode.
301 INSN specifies the opcode to test.
302 IS_BL receives the 'op' bit from the decoded instruction.
303 OFFSET receives the immediate offset from the decoded instruction.
305 Return 1 if the opcodes matches and is decoded, otherwise 0. */
308 decode_b (CORE_ADDR addr
, uint32_t insn
, int *is_bl
, int32_t *offset
)
310 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
311 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
312 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
314 *is_bl
= (insn
>> 31) & 0x1;
315 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
318 fprintf_unfiltered (gdb_stdlog
,
319 "decode: 0x%s 0x%x %s 0x%s\n",
320 core_addr_to_string_nz (addr
), insn
,
322 core_addr_to_string_nz (addr
+ *offset
));
329 /* Decode an opcode if it represents a conditional branch instruction.
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 COND receives the branch condition field from the decoded
335 OFFSET receives the immediate offset from the decoded instruction.
337 Return 1 if the opcodes matches and is decoded, otherwise 0. */
340 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
342 /* b.cond 0101 0100 iiii iiii iiii iiii iii0 cccc */
343 if (decode_masked_match (insn
, 0xff000010, 0x54000000))
345 *cond
= (insn
>> 0) & 0xf;
346 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
349 fprintf_unfiltered (gdb_stdlog
,
350 "decode: 0x%s 0x%x b<%u> 0x%s\n",
351 core_addr_to_string_nz (addr
), insn
, *cond
,
352 core_addr_to_string_nz (addr
+ *offset
));
358 /* Decode an opcode if it represents a branch via register instruction.
360 ADDR specifies the address of the opcode.
361 INSN specifies the opcode to test.
362 IS_BLR receives the 'op' bit from the decoded instruction.
363 RN receives the 'rn' field from the decoded instruction.
365 Return 1 if the opcodes matches and is decoded, otherwise 0. */
368 decode_br (CORE_ADDR addr
, uint32_t insn
, int *is_blr
, unsigned *rn
)
370 /* 8 4 0 6 2 8 4 0 */
371 /* blr 110101100011111100000000000rrrrr */
372 /* br 110101100001111100000000000rrrrr */
373 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
375 *is_blr
= (insn
>> 21) & 1;
376 *rn
= (insn
>> 5) & 0x1f;
379 fprintf_unfiltered (gdb_stdlog
,
380 "decode: 0x%s 0x%x %s 0x%x\n",
381 core_addr_to_string_nz (addr
), insn
,
382 *is_blr
? "blr" : "br", *rn
);
389 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
391 ADDR specifies the address of the opcode.
392 INSN specifies the opcode to test.
393 IS64 receives the 'sf' field from the decoded instruction.
394 IS_CBNZ receives the 'op' field from the decoded instruction.
395 RN receives the 'rn' field from the decoded instruction.
396 OFFSET receives the 'imm19' field from the decoded instruction.
398 Return 1 if the opcodes matches and is decoded, otherwise 0. */
401 decode_cb (CORE_ADDR addr
, uint32_t insn
, int *is64
, int *is_cbnz
,
402 unsigned *rn
, int32_t *offset
)
404 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
405 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
406 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
408 *rn
= (insn
>> 0) & 0x1f;
409 *is64
= (insn
>> 31) & 0x1;
410 *is_cbnz
= (insn
>> 24) & 0x1;
411 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
414 fprintf_unfiltered (gdb_stdlog
,
415 "decode: 0x%s 0x%x %s 0x%s\n",
416 core_addr_to_string_nz (addr
), insn
,
417 *is_cbnz
? "cbnz" : "cbz",
418 core_addr_to_string_nz (addr
+ *offset
));
424 /* Decode an opcode if it represents a ERET instruction.
426 ADDR specifies the address of the opcode.
427 INSN specifies the opcode to test.
429 Return 1 if the opcodes matches and is decoded, otherwise 0. */
432 decode_eret (CORE_ADDR addr
, uint32_t insn
)
434 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
435 if (insn
== 0xd69f03e0)
438 fprintf_unfiltered (gdb_stdlog
, "decode: 0x%s 0x%x eret\n",
439 core_addr_to_string_nz (addr
), insn
);
445 /* Decode an opcode if it represents a MOVZ instruction.
447 ADDR specifies the address of the opcode.
448 INSN specifies the opcode to test.
449 RD receives the 'rd' field from the decoded instruction.
451 Return 1 if the opcodes matches and is decoded, otherwise 0. */
454 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
456 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
458 *rd
= (insn
>> 0) & 0x1f;
461 fprintf_unfiltered (gdb_stdlog
,
462 "decode: 0x%s 0x%x movz x%u, #?\n",
463 core_addr_to_string_nz (addr
), insn
, *rd
);
469 /* Decode an opcode if it represents a ORR (shifted register)
472 ADDR specifies the address of the opcode.
473 INSN specifies the opcode to test.
474 RD receives the 'rd' field from the decoded instruction.
475 RN receives the 'rn' field from the decoded instruction.
476 RM receives the 'rm' field from the decoded instruction.
477 IMM receives the 'imm6' field from the decoded instruction.
479 Return 1 if the opcodes matches and is decoded, otherwise 0. */
482 decode_orr_shifted_register_x (CORE_ADDR addr
,
483 uint32_t insn
, unsigned *rd
, unsigned *rn
,
484 unsigned *rm
, int32_t *imm
)
486 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
488 *rd
= (insn
>> 0) & 0x1f;
489 *rn
= (insn
>> 5) & 0x1f;
490 *rm
= (insn
>> 16) & 0x1f;
491 *imm
= (insn
>> 10) & 0x3f;
494 fprintf_unfiltered (gdb_stdlog
,
495 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
496 core_addr_to_string_nz (addr
), insn
, *rd
,
503 /* Decode an opcode if it represents a RET instruction.
505 ADDR specifies the address of the opcode.
506 INSN specifies the opcode to test.
507 RN receives the 'rn' field from the decoded instruction.
509 Return 1 if the opcodes matches and is decoded, otherwise 0. */
512 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
514 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
516 *rn
= (insn
>> 5) & 0x1f;
518 fprintf_unfiltered (gdb_stdlog
,
519 "decode: 0x%s 0x%x ret x%u\n",
520 core_addr_to_string_nz (addr
), insn
, *rn
);
526 /* Decode an opcode if it represents the following instruction:
527 STP rt, rt2, [rn, #imm]
529 ADDR specifies the address of the opcode.
530 INSN specifies the opcode to test.
531 RT1 receives the 'rt' field from the decoded instruction.
532 RT2 receives the 'rt2' field from the decoded instruction.
533 RN receives the 'rn' field from the decoded instruction.
534 IMM receives the 'imm' field from the decoded instruction.
536 Return 1 if the opcodes matches and is decoded, otherwise 0. */
539 decode_stp_offset (CORE_ADDR addr
,
541 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
543 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
545 *rt1
= (insn
>> 0) & 0x1f;
546 *rn
= (insn
>> 5) & 0x1f;
547 *rt2
= (insn
>> 10) & 0x1f;
548 *imm
= extract_signed_bitfield (insn
, 7, 15);
552 fprintf_unfiltered (gdb_stdlog
,
553 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
554 core_addr_to_string_nz (addr
), insn
,
555 *rt1
, *rt2
, *rn
, *imm
);
561 /* Decode an opcode if it represents the following instruction:
562 STP rt, rt2, [rn, #imm]!
564 ADDR specifies the address of the opcode.
565 INSN specifies the opcode to test.
566 RT1 receives the 'rt' field from the decoded instruction.
567 RT2 receives the 'rt2' field from the decoded instruction.
568 RN receives the 'rn' field from the decoded instruction.
569 IMM receives the 'imm' field from the decoded instruction.
571 Return 1 if the opcodes matches and is decoded, otherwise 0. */
574 decode_stp_offset_wb (CORE_ADDR addr
,
576 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
579 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
581 *rt1
= (insn
>> 0) & 0x1f;
582 *rn
= (insn
>> 5) & 0x1f;
583 *rt2
= (insn
>> 10) & 0x1f;
584 *imm
= extract_signed_bitfield (insn
, 7, 15);
588 fprintf_unfiltered (gdb_stdlog
,
589 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
590 core_addr_to_string_nz (addr
), insn
,
591 *rt1
, *rt2
, *rn
, *imm
);
597 /* Decode an opcode if it represents the following instruction:
600 ADDR specifies the address of the opcode.
601 INSN specifies the opcode to test.
602 IS64 receives size field from the decoded instruction.
603 RT receives the 'rt' field from the decoded instruction.
604 RN receives the 'rn' field from the decoded instruction.
605 IMM receives the 'imm' field from the decoded instruction.
607 Return 1 if the opcodes matches and is decoded, otherwise 0. */
610 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
611 unsigned *rn
, int32_t *imm
)
613 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
615 *is64
= (insn
>> 30) & 1;
616 *rt
= (insn
>> 0) & 0x1f;
617 *rn
= (insn
>> 5) & 0x1f;
618 *imm
= extract_signed_bitfield (insn
, 9, 12);
621 fprintf_unfiltered (gdb_stdlog
,
622 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
623 core_addr_to_string_nz (addr
), insn
,
624 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
630 /* Decode an opcode if it represents a TBZ or TBNZ instruction.
632 ADDR specifies the address of the opcode.
633 INSN specifies the opcode to test.
634 IS_TBNZ receives the 'op' field from the decoded instruction.
635 BIT receives the bit position field from the decoded instruction.
636 RT receives 'rt' field from the decoded instruction.
637 IMM receives 'imm' field from the decoded instruction.
639 Return 1 if the opcodes matches and is decoded, otherwise 0. */
642 decode_tb (CORE_ADDR addr
, uint32_t insn
, int *is_tbnz
, unsigned *bit
,
643 unsigned *rt
, int32_t *imm
)
645 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
646 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
647 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
649 *rt
= (insn
>> 0) & 0x1f;
650 *is_tbnz
= (insn
>> 24) & 0x1;
651 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
652 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
655 fprintf_unfiltered (gdb_stdlog
,
656 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
657 core_addr_to_string_nz (addr
), insn
,
658 *is_tbnz
? "tbnz" : "tbz", *rt
, *bit
,
659 core_addr_to_string_nz (addr
+ *imm
));
665 /* Analyze a prologue, looking for a recognizable stack frame
666 and frame pointer. Scan until we encounter a store that could
667 clobber the stack frame unexpectedly, or an unknown instruction. */
670 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
671 CORE_ADDR start
, CORE_ADDR limit
,
672 struct aarch64_prologue_cache
*cache
)
674 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
676 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
677 struct pv_area
*stack
;
678 struct cleanup
*back_to
;
680 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
681 regs
[i
] = pv_register (i
, 0);
682 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
683 back_to
= make_cleanup_free_pv_area (stack
);
685 for (; start
< limit
; start
+= 4)
704 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
706 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
707 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
708 else if (decode_adrp (start
, insn
, &rd
))
709 regs
[rd
] = pv_unknown ();
710 else if (decode_b (start
, insn
, &is_link
, &offset
))
712 /* Stop analysis on branch. */
715 else if (decode_bcond (start
, insn
, &cond
, &offset
))
717 /* Stop analysis on branch. */
720 else if (decode_br (start
, insn
, &is_link
, &rn
))
722 /* Stop analysis on branch. */
725 else if (decode_cb (start
, insn
, &is64
, &is_cbnz
, &rn
, &offset
))
727 /* Stop analysis on branch. */
730 else if (decode_eret (start
, insn
))
732 /* Stop analysis on branch. */
735 else if (decode_movz (start
, insn
, &rd
))
736 regs
[rd
] = pv_unknown ();
738 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
740 if (imm
== 0 && rn
== 31)
747 "aarch64: prologue analysis gave up addr=0x%s "
748 "opcode=0x%x (orr x register)\n",
749 core_addr_to_string_nz (start
),
754 else if (decode_ret (start
, insn
, &rn
))
756 /* Stop analysis on branch. */
759 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
761 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
762 is64
? 8 : 4, regs
[rt
]);
764 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
766 /* If recording this store would invalidate the store area
767 (perhaps because rn is not known) then we should abandon
768 further prologue analysis. */
769 if (pv_area_store_would_trash (stack
,
770 pv_add_constant (regs
[rn
], imm
)))
773 if (pv_area_store_would_trash (stack
,
774 pv_add_constant (regs
[rn
], imm
+ 8)))
777 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
779 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
782 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
784 /* If recording this store would invalidate the store area
785 (perhaps because rn is not known) then we should abandon
786 further prologue analysis. */
787 if (pv_area_store_would_trash (stack
,
788 pv_add_constant (regs
[rn
], imm
)))
791 if (pv_area_store_would_trash (stack
,
792 pv_add_constant (regs
[rn
], imm
+ 8)))
795 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
797 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
799 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
801 else if (decode_tb (start
, insn
, &is_tbnz
, &bit
, &rn
, &offset
))
803 /* Stop analysis on branch. */
809 fprintf_unfiltered (gdb_stdlog
,
810 "aarch64: prologue analysis gave up addr=0x%s"
812 core_addr_to_string_nz (start
), insn
);
819 do_cleanups (back_to
);
823 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
825 /* Frame pointer is fp. Frame size is constant. */
826 cache
->framereg
= AARCH64_FP_REGNUM
;
827 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
829 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
831 /* Try the stack pointer. */
832 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
833 cache
->framereg
= AARCH64_SP_REGNUM
;
837 /* We're just out of luck. We don't know where the frame is. */
838 cache
->framereg
= -1;
839 cache
->framesize
= 0;
842 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
846 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
847 cache
->saved_regs
[i
].addr
= offset
;
850 do_cleanups (back_to
);
854 /* Implement the "skip_prologue" gdbarch method. */
857 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
861 CORE_ADDR func_addr
, limit_pc
;
862 struct symtab_and_line sal
;
864 /* See if we can determine the end of the prologue via the symbol
865 table. If so, then return either PC, or the PC after the
866 prologue, whichever is greater. */
867 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
869 CORE_ADDR post_prologue_pc
870 = skip_prologue_using_sal (gdbarch
, func_addr
);
872 if (post_prologue_pc
!= 0)
873 return max (pc
, post_prologue_pc
);
876 /* Can't determine prologue from the symbol table, need to examine
879 /* Find an upper limit on the function prologue using the debug
880 information. If the debug information could not be used to
881 provide that bound, then use an arbitrary large number as the
883 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
885 limit_pc
= pc
+ 128; /* Magic. */
887 /* Try disassembling prologue. */
888 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
891 /* Scan the function prologue for THIS_FRAME and populate the prologue
895 aarch64_scan_prologue (struct frame_info
*this_frame
,
896 struct aarch64_prologue_cache
*cache
)
898 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
899 CORE_ADDR prologue_start
;
900 CORE_ADDR prologue_end
;
901 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
902 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
904 cache
->prev_pc
= prev_pc
;
906 /* Assume we do not find a frame. */
907 cache
->framereg
= -1;
908 cache
->framesize
= 0;
910 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
913 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
917 /* No line info so use the current PC. */
918 prologue_end
= prev_pc
;
920 else if (sal
.end
< prologue_end
)
922 /* The next line begins after the function end. */
923 prologue_end
= sal
.end
;
926 prologue_end
= min (prologue_end
, prev_pc
);
927 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
934 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
936 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
940 cache
->framereg
= AARCH64_FP_REGNUM
;
941 cache
->framesize
= 16;
942 cache
->saved_regs
[29].addr
= 0;
943 cache
->saved_regs
[30].addr
= 8;
947 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
948 function may throw an exception if the inferior's registers or memory is
952 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
953 struct aarch64_prologue_cache
*cache
)
955 CORE_ADDR unwound_fp
;
958 aarch64_scan_prologue (this_frame
, cache
);
960 if (cache
->framereg
== -1)
963 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
967 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
969 /* Calculate actual addresses of saved registers using offsets
970 determined by aarch64_analyze_prologue. */
971 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
972 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
973 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
975 cache
->func
= get_frame_func (this_frame
);
977 cache
->available_p
= 1;
980 /* Allocate and fill in *THIS_CACHE with information about the prologue of
981 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
982 Return a pointer to the current aarch64_prologue_cache in
985 static struct aarch64_prologue_cache
*
986 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
988 struct aarch64_prologue_cache
*cache
;
990 if (*this_cache
!= NULL
)
993 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
994 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
999 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1001 CATCH (ex
, RETURN_MASK_ERROR
)
1003 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1004 throw_exception (ex
);
1011 /* Implement the "stop_reason" frame_unwind method. */
1013 static enum unwind_stop_reason
1014 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1017 struct aarch64_prologue_cache
*cache
1018 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1020 if (!cache
->available_p
)
1021 return UNWIND_UNAVAILABLE
;
1023 /* Halt the backtrace at "_start". */
1024 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1025 return UNWIND_OUTERMOST
;
1027 /* We've hit a wall, stop. */
1028 if (cache
->prev_sp
== 0)
1029 return UNWIND_OUTERMOST
;
1031 return UNWIND_NO_REASON
;
1034 /* Our frame ID for a normal frame is the current function's starting
1035 PC and the caller's SP when we were called. */
1038 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1039 void **this_cache
, struct frame_id
*this_id
)
1041 struct aarch64_prologue_cache
*cache
1042 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1044 if (!cache
->available_p
)
1045 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1047 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1050 /* Implement the "prev_register" frame_unwind method. */
1052 static struct value
*
1053 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1054 void **this_cache
, int prev_regnum
)
1056 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1057 struct aarch64_prologue_cache
*cache
1058 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1060 /* If we are asked to unwind the PC, then we need to return the LR
1061 instead. The prologue may save PC, but it will point into this
1062 frame's prologue, not the next frame's resume location. */
1063 if (prev_regnum
== AARCH64_PC_REGNUM
)
1067 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1068 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1071 /* SP is generally not saved to the stack, but this frame is
1072 identified by the next frame's stack pointer at the time of the
1073 call. The value was already reconstructed into PREV_SP. */
1079 | | | <- Previous SP
1082 +--| saved fp |<- FP
1086 if (prev_regnum
== AARCH64_SP_REGNUM
)
1087 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1090 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1094 /* AArch64 prologue unwinder. */
1095 struct frame_unwind aarch64_prologue_unwind
=
1098 aarch64_prologue_frame_unwind_stop_reason
,
1099 aarch64_prologue_this_id
,
1100 aarch64_prologue_prev_register
,
1102 default_frame_sniffer
1105 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1106 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1107 Return a pointer to the current aarch64_prologue_cache in
1110 static struct aarch64_prologue_cache
*
1111 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1113 struct aarch64_prologue_cache
*cache
;
1115 if (*this_cache
!= NULL
)
1118 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1119 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1120 *this_cache
= cache
;
1124 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1126 cache
->prev_pc
= get_frame_pc (this_frame
);
1127 cache
->available_p
= 1;
1129 CATCH (ex
, RETURN_MASK_ERROR
)
1131 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1132 throw_exception (ex
);
1139 /* Implement the "stop_reason" frame_unwind method. */
1141 static enum unwind_stop_reason
1142 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1145 struct aarch64_prologue_cache
*cache
1146 = aarch64_make_stub_cache (this_frame
, this_cache
);
1148 if (!cache
->available_p
)
1149 return UNWIND_UNAVAILABLE
;
1151 return UNWIND_NO_REASON
;
1154 /* Our frame ID for a stub frame is the current SP and LR. */
1157 aarch64_stub_this_id (struct frame_info
*this_frame
,
1158 void **this_cache
, struct frame_id
*this_id
)
1160 struct aarch64_prologue_cache
*cache
1161 = aarch64_make_stub_cache (this_frame
, this_cache
);
1163 if (cache
->available_p
)
1164 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1166 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1169 /* Implement the "sniffer" frame_unwind method. */
1172 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1173 struct frame_info
*this_frame
,
1174 void **this_prologue_cache
)
1176 CORE_ADDR addr_in_block
;
1179 addr_in_block
= get_frame_address_in_block (this_frame
);
1180 if (in_plt_section (addr_in_block
)
1181 /* We also use the stub winder if the target memory is unreadable
1182 to avoid having the prologue unwinder trying to read it. */
1183 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1189 /* AArch64 stub unwinder. */
1190 struct frame_unwind aarch64_stub_unwind
=
1193 aarch64_stub_frame_unwind_stop_reason
,
1194 aarch64_stub_this_id
,
1195 aarch64_prologue_prev_register
,
1197 aarch64_stub_unwind_sniffer
1200 /* Return the frame base address of *THIS_FRAME. */
1203 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1205 struct aarch64_prologue_cache
*cache
1206 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1208 return cache
->prev_sp
- cache
->framesize
;
1211 /* AArch64 default frame base information. */
1212 struct frame_base aarch64_normal_base
=
1214 &aarch64_prologue_unwind
,
1215 aarch64_normal_frame_base
,
1216 aarch64_normal_frame_base
,
1217 aarch64_normal_frame_base
1220 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1221 dummy frame. The frame ID's base needs to match the TOS value
1222 saved by save_dummy_frame_tos () and returned from
1223 aarch64_push_dummy_call, and the PC needs to match the dummy
1224 frame's breakpoint. */
1226 static struct frame_id
1227 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1229 return frame_id_build (get_frame_register_unsigned (this_frame
,
1231 get_frame_pc (this_frame
));
1234 /* Implement the "unwind_pc" gdbarch method. */
1237 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1240 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1245 /* Implement the "unwind_sp" gdbarch method. */
1248 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1250 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1253 /* Return the value of the REGNUM register in the previous frame of
1256 static struct value
*
1257 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1258 void **this_cache
, int regnum
)
1260 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1265 case AARCH64_PC_REGNUM
:
1266 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1267 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1270 internal_error (__FILE__
, __LINE__
,
1271 _("Unexpected register %d"), regnum
);
1275 /* Implement the "init_reg" dwarf2_frame_ops method. */
1278 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1279 struct dwarf2_frame_state_reg
*reg
,
1280 struct frame_info
*this_frame
)
1284 case AARCH64_PC_REGNUM
:
1285 reg
->how
= DWARF2_FRAME_REG_FN
;
1286 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1288 case AARCH64_SP_REGNUM
:
1289 reg
->how
= DWARF2_FRAME_REG_CFA
;
1294 /* When arguments must be pushed onto the stack, they go on in reverse
1295 order. The code below implements a FILO (stack) to do this. */
1299 /* Value to pass on stack. */
1302 /* Size in bytes of value to pass on stack. */
1306 DEF_VEC_O (stack_item_t
);
1308 /* Return the alignment (in bytes) of the given type. */
1311 aarch64_type_align (struct type
*t
)
1317 t
= check_typedef (t
);
1318 switch (TYPE_CODE (t
))
1321 /* Should never happen. */
1322 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1326 case TYPE_CODE_ENUM
:
1330 case TYPE_CODE_RANGE
:
1331 case TYPE_CODE_BITSTRING
:
1333 case TYPE_CODE_CHAR
:
1334 case TYPE_CODE_BOOL
:
1335 return TYPE_LENGTH (t
);
1337 case TYPE_CODE_ARRAY
:
1338 case TYPE_CODE_COMPLEX
:
1339 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1341 case TYPE_CODE_STRUCT
:
1342 case TYPE_CODE_UNION
:
1344 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1346 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1354 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1355 defined in the AAPCS64 ABI document; otherwise return 0. */
1358 is_hfa (struct type
*ty
)
1360 switch (TYPE_CODE (ty
))
1362 case TYPE_CODE_ARRAY
:
1364 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1365 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1370 case TYPE_CODE_UNION
:
1371 case TYPE_CODE_STRUCT
:
1373 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1375 struct type
*member0_type
;
1377 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1378 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1382 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1384 struct type
*member1_type
;
1386 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1387 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1388 || (TYPE_LENGTH (member0_type
)
1389 != TYPE_LENGTH (member1_type
)))
1405 /* AArch64 function call information structure. */
1406 struct aarch64_call_info
1408 /* the current argument number. */
1411 /* The next general purpose register number, equivalent to NGRN as
1412 described in the AArch64 Procedure Call Standard. */
1415 /* The next SIMD and floating point register number, equivalent to
1416 NSRN as described in the AArch64 Procedure Call Standard. */
1419 /* The next stacked argument address, equivalent to NSAA as
1420 described in the AArch64 Procedure Call Standard. */
1423 /* Stack item vector. */
1424 VEC(stack_item_t
) *si
;
1427 /* Pass a value in a sequence of consecutive X registers. The caller
1428 is responsbile for ensuring sufficient registers are available. */
1431 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1432 struct aarch64_call_info
*info
, struct type
*type
,
1433 const bfd_byte
*buf
)
1435 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1436 int len
= TYPE_LENGTH (type
);
1437 enum type_code typecode
= TYPE_CODE (type
);
1438 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1444 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1445 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1449 /* Adjust sub-word struct/union args when big-endian. */
1450 if (byte_order
== BFD_ENDIAN_BIG
1451 && partial_len
< X_REGISTER_SIZE
1452 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1453 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1456 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
1458 gdbarch_register_name (gdbarch
, regnum
),
1459 phex (regval
, X_REGISTER_SIZE
));
1460 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1467 /* Attempt to marshall a value in a V register. Return 1 if
1468 successful, or 0 if insufficient registers are available. This
1469 function, unlike the equivalent pass_in_x() function does not
1470 handle arguments spread across multiple registers. */
1473 pass_in_v (struct gdbarch
*gdbarch
,
1474 struct regcache
*regcache
,
1475 struct aarch64_call_info
*info
,
1476 const bfd_byte
*buf
)
1480 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1481 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1486 regcache_cooked_write (regcache
, regnum
, buf
);
1488 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s\n",
1490 gdbarch_register_name (gdbarch
, regnum
));
1497 /* Marshall an argument onto the stack. */
1500 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1501 const bfd_byte
*buf
)
1503 int len
= TYPE_LENGTH (type
);
1509 align
= aarch64_type_align (type
);
1511 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1512 Natural alignment of the argument's type. */
1513 align
= align_up (align
, 8);
1515 /* The AArch64 PCS requires at most doubleword alignment. */
1520 fprintf_unfiltered (gdb_stdlog
, "arg %d len=%d @ sp + %d\n",
1521 info
->argnum
, len
, info
->nsaa
);
1525 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1528 if (info
->nsaa
& (align
- 1))
1530 /* Push stack alignment padding. */
1531 int pad
= align
- (info
->nsaa
& (align
- 1));
1536 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1541 /* Marshall an argument into a sequence of one or more consecutive X
1542 registers or, if insufficient X registers are available then onto
1546 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1547 struct aarch64_call_info
*info
, struct type
*type
,
1548 const bfd_byte
*buf
)
1550 int len
= TYPE_LENGTH (type
);
1551 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1553 /* PCS C.13 - Pass in registers if we have enough spare */
1554 if (info
->ngrn
+ nregs
<= 8)
1556 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1557 info
->ngrn
+= nregs
;
1562 pass_on_stack (info
, type
, buf
);
1566 /* Pass a value in a V register, or on the stack if insufficient are
1570 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1571 struct regcache
*regcache
,
1572 struct aarch64_call_info
*info
,
1574 const bfd_byte
*buf
)
1576 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1577 pass_on_stack (info
, type
, buf
);
1580 /* Implement the "push_dummy_call" gdbarch method. */
1583 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1584 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1586 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1587 CORE_ADDR struct_addr
)
1593 struct aarch64_call_info info
;
1594 struct type
*func_type
;
1595 struct type
*return_type
;
1596 int lang_struct_return
;
1598 memset (&info
, 0, sizeof (info
));
1600 /* We need to know what the type of the called function is in order
1601 to determine the number of named/anonymous arguments for the
1602 actual argument placement, and the return type in order to handle
1603 return value correctly.
1605 The generic code above us views the decision of return in memory
1606 or return in registers as a two stage processes. The language
1607 handler is consulted first and may decide to return in memory (eg
1608 class with copy constructor returned by value), this will cause
1609 the generic code to allocate space AND insert an initial leading
1612 If the language code does not decide to pass in memory then the
1613 target code is consulted.
1615 If the language code decides to pass in memory we want to move
1616 the pointer inserted as the initial argument from the argument
1617 list and into X8, the conventional AArch64 struct return pointer
1620 This is slightly awkward, ideally the flag "lang_struct_return"
1621 would be passed to the targets implementation of push_dummy_call.
1622 Rather that change the target interface we call the language code
1623 directly ourselves. */
1625 func_type
= check_typedef (value_type (function
));
1627 /* Dereference function pointer types. */
1628 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1629 func_type
= TYPE_TARGET_TYPE (func_type
);
1631 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1632 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1634 /* If language_pass_by_reference () returned true we will have been
1635 given an additional initial argument, a hidden pointer to the
1636 return slot in memory. */
1637 return_type
= TYPE_TARGET_TYPE (func_type
);
1638 lang_struct_return
= language_pass_by_reference (return_type
);
1640 /* Set the return address. For the AArch64, the return breakpoint
1641 is always at BP_ADDR. */
1642 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1644 /* If we were given an initial argument for the return slot because
1645 lang_struct_return was true, lose it. */
1646 if (lang_struct_return
)
1652 /* The struct_return pointer occupies X8. */
1653 if (struct_return
|| lang_struct_return
)
1656 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = 0x%s\n",
1657 gdbarch_register_name
1659 AARCH64_STRUCT_RETURN_REGNUM
),
1660 paddress (gdbarch
, struct_addr
));
1661 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1665 for (argnum
= 0; argnum
< nargs
; argnum
++)
1667 struct value
*arg
= args
[argnum
];
1668 struct type
*arg_type
;
1671 arg_type
= check_typedef (value_type (arg
));
1672 len
= TYPE_LENGTH (arg_type
);
1674 switch (TYPE_CODE (arg_type
))
1677 case TYPE_CODE_BOOL
:
1678 case TYPE_CODE_CHAR
:
1679 case TYPE_CODE_RANGE
:
1680 case TYPE_CODE_ENUM
:
1683 /* Promote to 32 bit integer. */
1684 if (TYPE_UNSIGNED (arg_type
))
1685 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1687 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1688 arg
= value_cast (arg_type
, arg
);
1690 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1691 value_contents (arg
));
1694 case TYPE_CODE_COMPLEX
:
1697 const bfd_byte
*buf
= value_contents (arg
);
1698 struct type
*target_type
=
1699 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1701 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1702 pass_in_v (gdbarch
, regcache
, &info
,
1703 buf
+ TYPE_LENGTH (target_type
));
1708 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1712 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1713 value_contents (arg
));
1716 case TYPE_CODE_STRUCT
:
1717 case TYPE_CODE_ARRAY
:
1718 case TYPE_CODE_UNION
:
1719 if (is_hfa (arg_type
))
1721 int elements
= TYPE_NFIELDS (arg_type
);
1723 /* Homogeneous Aggregates */
1724 if (info
.nsrn
+ elements
< 8)
1728 for (i
= 0; i
< elements
; i
++)
1730 /* We know that we have sufficient registers
1731 available therefore this will never fallback
1733 struct value
*field
=
1734 value_primitive_field (arg
, 0, i
, arg_type
);
1735 struct type
*field_type
=
1736 check_typedef (value_type (field
));
1738 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1739 value_contents_writeable (field
));
1745 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1750 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1751 invisible reference. */
1753 /* Allocate aligned storage. */
1754 sp
= align_down (sp
- len
, 16);
1756 /* Write the real data into the stack. */
1757 write_memory (sp
, value_contents (arg
), len
);
1759 /* Construct the indirection. */
1760 arg_type
= lookup_pointer_type (arg_type
);
1761 arg
= value_from_pointer (arg_type
, sp
);
1762 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1763 value_contents (arg
));
1766 /* PCS C.15 / C.18 multiple values pass. */
1767 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1768 value_contents (arg
));
1772 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1773 value_contents (arg
));
1778 /* Make sure stack retains 16 byte alignment. */
1780 sp
-= 16 - (info
.nsaa
& 15);
1782 while (!VEC_empty (stack_item_t
, info
.si
))
1784 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1787 write_memory (sp
, si
->data
, si
->len
);
1788 VEC_pop (stack_item_t
, info
.si
);
1791 VEC_free (stack_item_t
, info
.si
);
1793 /* Finally, update the SP register. */
1794 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1799 /* Implement the "frame_align" gdbarch method. */
1802 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1804 /* Align the stack to sixteen bytes. */
1805 return sp
& ~(CORE_ADDR
) 15;
1808 /* Return the type for an AdvSISD Q register. */
1810 static struct type
*
1811 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1813 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1815 if (tdep
->vnq_type
== NULL
)
1820 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1823 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1824 append_composite_type_field (t
, "u", elem
);
1826 elem
= builtin_type (gdbarch
)->builtin_int128
;
1827 append_composite_type_field (t
, "s", elem
);
1832 return tdep
->vnq_type
;
1835 /* Return the type for an AdvSISD D register. */
1837 static struct type
*
1838 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1840 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1842 if (tdep
->vnd_type
== NULL
)
1847 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1850 elem
= builtin_type (gdbarch
)->builtin_double
;
1851 append_composite_type_field (t
, "f", elem
);
1853 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1854 append_composite_type_field (t
, "u", elem
);
1856 elem
= builtin_type (gdbarch
)->builtin_int64
;
1857 append_composite_type_field (t
, "s", elem
);
1862 return tdep
->vnd_type
;
1865 /* Return the type for an AdvSISD S register. */
1867 static struct type
*
1868 aarch64_vns_type (struct gdbarch
*gdbarch
)
1870 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1872 if (tdep
->vns_type
== NULL
)
1877 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1880 elem
= builtin_type (gdbarch
)->builtin_float
;
1881 append_composite_type_field (t
, "f", elem
);
1883 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1884 append_composite_type_field (t
, "u", elem
);
1886 elem
= builtin_type (gdbarch
)->builtin_int32
;
1887 append_composite_type_field (t
, "s", elem
);
1892 return tdep
->vns_type
;
1895 /* Return the type for an AdvSISD H register. */
1897 static struct type
*
1898 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1900 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1902 if (tdep
->vnh_type
== NULL
)
1907 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1910 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1911 append_composite_type_field (t
, "u", elem
);
1913 elem
= builtin_type (gdbarch
)->builtin_int16
;
1914 append_composite_type_field (t
, "s", elem
);
1919 return tdep
->vnh_type
;
1922 /* Return the type for an AdvSISD B register. */
1924 static struct type
*
1925 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1927 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1929 if (tdep
->vnb_type
== NULL
)
1934 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1937 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1938 append_composite_type_field (t
, "u", elem
);
1940 elem
= builtin_type (gdbarch
)->builtin_int8
;
1941 append_composite_type_field (t
, "s", elem
);
1946 return tdep
->vnb_type
;
1949 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1952 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1954 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1955 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1957 if (reg
== AARCH64_DWARF_SP
)
1958 return AARCH64_SP_REGNUM
;
1960 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1961 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1967 /* Implement the "print_insn" gdbarch method. */
1970 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1972 info
->symbols
= NULL
;
1973 return print_insn_aarch64 (memaddr
, info
);
1976 /* AArch64 BRK software debug mode instruction.
1977 Note that AArch64 code is always little-endian.
1978 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1979 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1981 /* Implement the "breakpoint_from_pc" gdbarch method. */
1983 static const gdb_byte
*
1984 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1987 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1989 *lenptr
= sizeof (aarch64_default_breakpoint
);
1990 return aarch64_default_breakpoint
;
1993 /* Extract from an array REGS containing the (raw) register state a
1994 function return value of type TYPE, and copy that, in virtual
1995 format, into VALBUF. */
1998 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2001 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2002 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2004 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2006 bfd_byte buf
[V_REGISTER_SIZE
];
2007 int len
= TYPE_LENGTH (type
);
2009 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
2010 memcpy (valbuf
, buf
, len
);
2012 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2013 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2014 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2015 || TYPE_CODE (type
) == TYPE_CODE_PTR
2016 || TYPE_CODE (type
) == TYPE_CODE_REF
2017 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2019 /* If the the type is a plain integer, then the access is
2020 straight-forward. Otherwise we have to play around a bit
2022 int len
= TYPE_LENGTH (type
);
2023 int regno
= AARCH64_X0_REGNUM
;
2028 /* By using store_unsigned_integer we avoid having to do
2029 anything special for small big-endian values. */
2030 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2031 store_unsigned_integer (valbuf
,
2032 (len
> X_REGISTER_SIZE
2033 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2034 len
-= X_REGISTER_SIZE
;
2035 valbuf
+= X_REGISTER_SIZE
;
2038 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
2040 int regno
= AARCH64_V0_REGNUM
;
2041 bfd_byte buf
[V_REGISTER_SIZE
];
2042 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
2043 int len
= TYPE_LENGTH (target_type
);
2045 regcache_cooked_read (regs
, regno
, buf
);
2046 memcpy (valbuf
, buf
, len
);
2048 regcache_cooked_read (regs
, regno
+ 1, buf
);
2049 memcpy (valbuf
, buf
, len
);
2052 else if (is_hfa (type
))
2054 int elements
= TYPE_NFIELDS (type
);
2055 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2056 int len
= TYPE_LENGTH (member_type
);
2059 for (i
= 0; i
< elements
; i
++)
2061 int regno
= AARCH64_V0_REGNUM
+ i
;
2062 bfd_byte buf
[X_REGISTER_SIZE
];
2065 fprintf_unfiltered (gdb_stdlog
,
2066 "read HFA return value element %d from %s\n",
2068 gdbarch_register_name (gdbarch
, regno
));
2069 regcache_cooked_read (regs
, regno
, buf
);
2071 memcpy (valbuf
, buf
, len
);
2077 /* For a structure or union the behaviour is as if the value had
2078 been stored to word-aligned memory and then loaded into
2079 registers with 64-bit load instruction(s). */
2080 int len
= TYPE_LENGTH (type
);
2081 int regno
= AARCH64_X0_REGNUM
;
2082 bfd_byte buf
[X_REGISTER_SIZE
];
2086 regcache_cooked_read (regs
, regno
++, buf
);
2087 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2088 len
-= X_REGISTER_SIZE
;
2089 valbuf
+= X_REGISTER_SIZE
;
2095 /* Will a function return an aggregate type in memory or in a
2096 register? Return 0 if an aggregate type can be returned in a
2097 register, 1 if it must be returned in memory. */
2100 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2103 enum type_code code
;
2105 type
= check_typedef (type
);
2107 /* In the AArch64 ABI, "integer" like aggregate types are returned
2108 in registers. For an aggregate type to be integer like, its size
2109 must be less than or equal to 4 * X_REGISTER_SIZE. */
2113 /* PCS B.5 If the argument is a Named HFA, then the argument is
2118 if (TYPE_LENGTH (type
) > 16)
2120 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2121 invisible reference. */
2129 /* Write into appropriate registers a function return value of type
2130 TYPE, given in virtual format. */
2133 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2134 const gdb_byte
*valbuf
)
2136 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2137 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2139 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2141 bfd_byte buf
[V_REGISTER_SIZE
];
2142 int len
= TYPE_LENGTH (type
);
2144 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2145 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2147 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2148 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2149 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2150 || TYPE_CODE (type
) == TYPE_CODE_PTR
2151 || TYPE_CODE (type
) == TYPE_CODE_REF
2152 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2154 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2156 /* Values of one word or less are zero/sign-extended and
2158 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2159 LONGEST val
= unpack_long (type
, valbuf
);
2161 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2162 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2166 /* Integral values greater than one word are stored in
2167 consecutive registers starting with r0. This will always
2168 be a multiple of the regiser size. */
2169 int len
= TYPE_LENGTH (type
);
2170 int regno
= AARCH64_X0_REGNUM
;
2174 regcache_cooked_write (regs
, regno
++, valbuf
);
2175 len
-= X_REGISTER_SIZE
;
2176 valbuf
+= X_REGISTER_SIZE
;
2180 else if (is_hfa (type
))
2182 int elements
= TYPE_NFIELDS (type
);
2183 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2184 int len
= TYPE_LENGTH (member_type
);
2187 for (i
= 0; i
< elements
; i
++)
2189 int regno
= AARCH64_V0_REGNUM
+ i
;
2190 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2193 fprintf_unfiltered (gdb_stdlog
,
2194 "write HFA return value element %d to %s\n",
2196 gdbarch_register_name (gdbarch
, regno
));
2198 memcpy (tmpbuf
, valbuf
, len
);
2199 regcache_cooked_write (regs
, regno
, tmpbuf
);
2205 /* For a structure or union the behaviour is as if the value had
2206 been stored to word-aligned memory and then loaded into
2207 registers with 64-bit load instruction(s). */
2208 int len
= TYPE_LENGTH (type
);
2209 int regno
= AARCH64_X0_REGNUM
;
2210 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2214 memcpy (tmpbuf
, valbuf
,
2215 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2216 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2217 len
-= X_REGISTER_SIZE
;
2218 valbuf
+= X_REGISTER_SIZE
;
2223 /* Implement the "return_value" gdbarch method. */
2225 static enum return_value_convention
2226 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2227 struct type
*valtype
, struct regcache
*regcache
,
2228 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2230 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2232 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2233 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2234 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2236 if (aarch64_return_in_memory (gdbarch
, valtype
))
2239 fprintf_unfiltered (gdb_stdlog
, "return value in memory\n");
2240 return RETURN_VALUE_STRUCT_CONVENTION
;
2245 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2248 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2251 fprintf_unfiltered (gdb_stdlog
, "return value in registers\n");
2253 return RETURN_VALUE_REGISTER_CONVENTION
;
2256 /* Implement the "get_longjmp_target" gdbarch method. */
2259 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2262 gdb_byte buf
[X_REGISTER_SIZE
];
2263 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2264 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2265 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2267 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2269 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2273 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2277 /* Implement the "gen_return_address" gdbarch method. */
2280 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2281 struct agent_expr
*ax
, struct axs_value
*value
,
2284 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2285 value
->kind
= axs_lvalue_register
;
2286 value
->u
.reg
= AARCH64_LR_REGNUM
;
2290 /* Return the pseudo register name corresponding to register regnum. */
2293 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2295 static const char *const q_name
[] =
2297 "q0", "q1", "q2", "q3",
2298 "q4", "q5", "q6", "q7",
2299 "q8", "q9", "q10", "q11",
2300 "q12", "q13", "q14", "q15",
2301 "q16", "q17", "q18", "q19",
2302 "q20", "q21", "q22", "q23",
2303 "q24", "q25", "q26", "q27",
2304 "q28", "q29", "q30", "q31",
2307 static const char *const d_name
[] =
2309 "d0", "d1", "d2", "d3",
2310 "d4", "d5", "d6", "d7",
2311 "d8", "d9", "d10", "d11",
2312 "d12", "d13", "d14", "d15",
2313 "d16", "d17", "d18", "d19",
2314 "d20", "d21", "d22", "d23",
2315 "d24", "d25", "d26", "d27",
2316 "d28", "d29", "d30", "d31",
2319 static const char *const s_name
[] =
2321 "s0", "s1", "s2", "s3",
2322 "s4", "s5", "s6", "s7",
2323 "s8", "s9", "s10", "s11",
2324 "s12", "s13", "s14", "s15",
2325 "s16", "s17", "s18", "s19",
2326 "s20", "s21", "s22", "s23",
2327 "s24", "s25", "s26", "s27",
2328 "s28", "s29", "s30", "s31",
2331 static const char *const h_name
[] =
2333 "h0", "h1", "h2", "h3",
2334 "h4", "h5", "h6", "h7",
2335 "h8", "h9", "h10", "h11",
2336 "h12", "h13", "h14", "h15",
2337 "h16", "h17", "h18", "h19",
2338 "h20", "h21", "h22", "h23",
2339 "h24", "h25", "h26", "h27",
2340 "h28", "h29", "h30", "h31",
2343 static const char *const b_name
[] =
2345 "b0", "b1", "b2", "b3",
2346 "b4", "b5", "b6", "b7",
2347 "b8", "b9", "b10", "b11",
2348 "b12", "b13", "b14", "b15",
2349 "b16", "b17", "b18", "b19",
2350 "b20", "b21", "b22", "b23",
2351 "b24", "b25", "b26", "b27",
2352 "b28", "b29", "b30", "b31",
2355 regnum
-= gdbarch_num_regs (gdbarch
);
2357 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2358 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2360 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2361 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2363 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2364 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2366 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2367 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2369 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2370 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2372 internal_error (__FILE__
, __LINE__
,
2373 _("aarch64_pseudo_register_name: bad register number %d"),
2377 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2379 static struct type
*
2380 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2382 regnum
-= gdbarch_num_regs (gdbarch
);
2384 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2385 return aarch64_vnq_type (gdbarch
);
2387 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2388 return aarch64_vnd_type (gdbarch
);
2390 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2391 return aarch64_vns_type (gdbarch
);
2393 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2394 return aarch64_vnh_type (gdbarch
);
2396 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2397 return aarch64_vnb_type (gdbarch
);
2399 internal_error (__FILE__
, __LINE__
,
2400 _("aarch64_pseudo_register_type: bad register number %d"),
2404 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2407 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2408 struct reggroup
*group
)
2410 regnum
-= gdbarch_num_regs (gdbarch
);
2412 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2413 return group
== all_reggroup
|| group
== vector_reggroup
;
2414 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2415 return (group
== all_reggroup
|| group
== vector_reggroup
2416 || group
== float_reggroup
);
2417 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2418 return (group
== all_reggroup
|| group
== vector_reggroup
2419 || group
== float_reggroup
);
2420 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2421 return group
== all_reggroup
|| group
== vector_reggroup
;
2422 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2423 return group
== all_reggroup
|| group
== vector_reggroup
;
2425 return group
== all_reggroup
;
2428 /* Implement the "pseudo_register_read_value" gdbarch method. */
2430 static struct value
*
2431 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2432 struct regcache
*regcache
,
2435 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2436 struct value
*result_value
;
2439 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2440 VALUE_LVAL (result_value
) = lval_register
;
2441 VALUE_REGNUM (result_value
) = regnum
;
2442 buf
= value_contents_raw (result_value
);
2444 regnum
-= gdbarch_num_regs (gdbarch
);
2446 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2448 enum register_status status
;
2451 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2452 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2453 if (status
!= REG_VALID
)
2454 mark_value_bytes_unavailable (result_value
, 0,
2455 TYPE_LENGTH (value_type (result_value
)));
2457 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2458 return result_value
;
2461 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2463 enum register_status status
;
2466 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2467 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2468 if (status
!= REG_VALID
)
2469 mark_value_bytes_unavailable (result_value
, 0,
2470 TYPE_LENGTH (value_type (result_value
)));
2472 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2473 return result_value
;
2476 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2478 enum register_status status
;
2481 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2482 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2483 if (status
!= REG_VALID
)
2484 mark_value_bytes_unavailable (result_value
, 0,
2485 TYPE_LENGTH (value_type (result_value
)));
2487 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2488 return result_value
;
2491 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2493 enum register_status status
;
2496 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2497 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2498 if (status
!= REG_VALID
)
2499 mark_value_bytes_unavailable (result_value
, 0,
2500 TYPE_LENGTH (value_type (result_value
)));
2502 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2503 return result_value
;
2506 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2508 enum register_status status
;
2511 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2512 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2513 if (status
!= REG_VALID
)
2514 mark_value_bytes_unavailable (result_value
, 0,
2515 TYPE_LENGTH (value_type (result_value
)));
2517 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2518 return result_value
;
2521 gdb_assert_not_reached ("regnum out of bound");
2524 /* Implement the "pseudo_register_write" gdbarch method. */
2527 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2528 int regnum
, const gdb_byte
*buf
)
2530 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2532 /* Ensure the register buffer is zero, we want gdb writes of the
2533 various 'scalar' pseudo registers to behavior like architectural
2534 writes, register width bytes are written the remainder are set to
2536 memset (reg_buf
, 0, sizeof (reg_buf
));
2538 regnum
-= gdbarch_num_regs (gdbarch
);
2540 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2542 /* pseudo Q registers */
2545 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2546 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2547 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2551 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2553 /* pseudo D registers */
2556 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2557 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2558 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2562 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2566 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2567 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2568 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2572 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2574 /* pseudo H registers */
2577 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2578 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2579 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2583 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2585 /* pseudo B registers */
2588 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2589 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2590 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2594 gdb_assert_not_reached ("regnum out of bound");
2597 /* Callback function for user_reg_add. */
2599 static struct value
*
2600 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2602 const int *reg_p
= baton
;
2604 return value_of_register (*reg_p
, frame
);
2608 /* Implement the "software_single_step" gdbarch method, needed to
2609 single step through atomic sequences on AArch64. */
2612 aarch64_software_single_step (struct frame_info
*frame
)
2614 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2615 struct address_space
*aspace
= get_frame_address_space (frame
);
2616 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2617 const int insn_size
= 4;
2618 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2619 CORE_ADDR pc
= get_frame_pc (frame
);
2620 CORE_ADDR breaks
[2] = { -1, -1 };
2622 CORE_ADDR closing_insn
= 0;
2623 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2624 byte_order_for_code
);
2627 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2628 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2630 /* Look for a Load Exclusive instruction which begins the sequence. */
2631 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2634 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2640 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2641 byte_order_for_code
);
2643 /* Check if the instruction is a conditional branch. */
2644 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2646 if (bc_insn_count
>= 1)
2649 /* It is, so we'll try to set a breakpoint at the destination. */
2650 breaks
[1] = loc
+ offset
;
2656 /* Look for the Store Exclusive which closes the atomic sequence. */
2657 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2664 /* We didn't find a closing Store Exclusive instruction, fall back. */
2668 /* Insert breakpoint after the end of the atomic sequence. */
2669 breaks
[0] = loc
+ insn_size
;
2671 /* Check for duplicated breakpoints, and also check that the second
2672 breakpoint is not within the atomic sequence. */
2674 && (breaks
[1] == breaks
[0]
2675 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2676 last_breakpoint
= 0;
2678 /* Insert the breakpoint at the end of the sequence, and one at the
2679 destination of the conditional branch, if it exists. */
2680 for (index
= 0; index
<= last_breakpoint
; index
++)
2681 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2686 /* Initialize the current architecture based on INFO. If possible,
2687 re-use an architecture from ARCHES, which is a list of
2688 architectures already created during this debugging session.
2690 Called e.g. at program startup, when reading a core file, and when
2691 reading a binary file. */
2693 static struct gdbarch
*
2694 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2696 struct gdbarch_tdep
*tdep
;
2697 struct gdbarch
*gdbarch
;
2698 struct gdbarch_list
*best_arch
;
2699 struct tdesc_arch_data
*tdesc_data
= NULL
;
2700 const struct target_desc
*tdesc
= info
.target_desc
;
2702 int have_fpa_registers
= 1;
2704 const struct tdesc_feature
*feature
;
2706 int num_pseudo_regs
= 0;
2708 /* Ensure we always have a target descriptor. */
2709 if (!tdesc_has_registers (tdesc
))
2710 tdesc
= tdesc_aarch64
;
2714 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2716 if (feature
== NULL
)
2719 tdesc_data
= tdesc_data_alloc ();
2721 /* Validate the descriptor provides the mandatory core R registers
2722 and allocate their numbers. */
2723 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2725 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2726 aarch64_r_register_names
[i
]);
2728 num_regs
= AARCH64_X0_REGNUM
+ i
;
2730 /* Look for the V registers. */
2731 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2734 /* Validate the descriptor provides the mandatory V registers
2735 and allocate their numbers. */
2736 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2738 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2739 aarch64_v_register_names
[i
]);
2741 num_regs
= AARCH64_V0_REGNUM
+ i
;
2743 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2744 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2745 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2746 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2747 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2752 tdesc_data_cleanup (tdesc_data
);
2756 /* AArch64 code is always little-endian. */
2757 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2759 /* If there is already a candidate, use it. */
2760 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2762 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2764 /* Found a match. */
2768 if (best_arch
!= NULL
)
2770 if (tdesc_data
!= NULL
)
2771 tdesc_data_cleanup (tdesc_data
);
2772 return best_arch
->gdbarch
;
2775 tdep
= XCNEW (struct gdbarch_tdep
);
2776 gdbarch
= gdbarch_alloc (&info
, tdep
);
2778 /* This should be low enough for everything. */
2779 tdep
->lowest_pc
= 0x20;
2780 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2781 tdep
->jb_elt_size
= 8;
2783 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2784 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2786 /* Frame handling. */
2787 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2788 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2789 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2791 /* Advance PC across function entry code. */
2792 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2794 /* The stack grows downward. */
2795 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2797 /* Breakpoint manipulation. */
2798 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2799 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2800 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2802 /* Information about registers, etc. */
2803 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2804 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2805 set_gdbarch_num_regs (gdbarch
, num_regs
);
2807 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2808 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2809 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2810 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2811 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2812 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2813 aarch64_pseudo_register_reggroup_p
);
2816 set_gdbarch_short_bit (gdbarch
, 16);
2817 set_gdbarch_int_bit (gdbarch
, 32);
2818 set_gdbarch_float_bit (gdbarch
, 32);
2819 set_gdbarch_double_bit (gdbarch
, 64);
2820 set_gdbarch_long_double_bit (gdbarch
, 128);
2821 set_gdbarch_long_bit (gdbarch
, 64);
2822 set_gdbarch_long_long_bit (gdbarch
, 64);
2823 set_gdbarch_ptr_bit (gdbarch
, 64);
2824 set_gdbarch_char_signed (gdbarch
, 0);
2825 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2826 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2827 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2829 /* Internal <-> external register number maps. */
2830 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2832 /* Returning results. */
2833 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2836 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2838 /* Virtual tables. */
2839 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2841 /* Hook in the ABI-specific overrides, if they have been registered. */
2842 info
.target_desc
= tdesc
;
2843 info
.tdep_info
= (void *) tdesc_data
;
2844 gdbarch_init_osabi (info
, gdbarch
);
2846 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2848 /* Add some default predicates. */
2849 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2850 dwarf2_append_unwinders (gdbarch
);
2851 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2853 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2855 /* Now we have tuned the configuration, set a few final things,
2856 based on what the OS ABI has told us. */
2858 if (tdep
->jb_pc
>= 0)
2859 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2861 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
2863 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2865 /* Add standard register aliases. */
2866 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2867 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2868 value_of_aarch64_user_reg
,
2869 &aarch64_register_aliases
[i
].regnum
);
2875 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2877 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2882 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2883 paddress (gdbarch
, tdep
->lowest_pc
));
2886 /* Suppress warning from -Wmissing-prototypes. */
2887 extern initialize_file_ftype _initialize_aarch64_tdep
;
2890 _initialize_aarch64_tdep (void)
2892 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2895 initialize_tdesc_aarch64 ();
2897 /* Debug this file's internals. */
2898 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2899 Set AArch64 debugging."), _("\
2900 Show AArch64 debugging."), _("\
2901 When on, AArch64 specific debugging is enabled."),
2904 &setdebuglist
, &showdebuglist
);
2907 /* AArch64 process record-replay related structures, defines etc. */
2909 #define submask(x) ((1L << ((x) + 1)) - 1)
2910 #define bit(obj,st) (((obj) >> (st)) & 1)
2911 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2913 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2916 unsigned int reg_len = LENGTH; \
2919 REGS = XNEWVEC (uint32_t, reg_len); \
2920 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2925 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2928 unsigned int mem_len = LENGTH; \
2931 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2932 memcpy(&MEMS->len, &RECORD_BUF[0], \
2933 sizeof(struct aarch64_mem_r) * LENGTH); \
2938 /* AArch64 record/replay structures and enumerations. */
2940 struct aarch64_mem_r
2942 uint64_t len
; /* Record length. */
2943 uint64_t addr
; /* Memory address. */
2946 enum aarch64_record_result
2948 AARCH64_RECORD_SUCCESS
,
2949 AARCH64_RECORD_FAILURE
,
2950 AARCH64_RECORD_UNSUPPORTED
,
2951 AARCH64_RECORD_UNKNOWN
2954 typedef struct insn_decode_record_t
2956 struct gdbarch
*gdbarch
;
2957 struct regcache
*regcache
;
2958 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2959 uint32_t aarch64_insn
; /* Insn to be recorded. */
2960 uint32_t mem_rec_count
; /* Count of memory records. */
2961 uint32_t reg_rec_count
; /* Count of register records. */
2962 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2963 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2964 } insn_decode_record
;
2966 /* Record handler for data processing - register instructions. */
2969 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2971 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2972 uint32_t record_buf
[4];
2974 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2975 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2976 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2978 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2982 /* Logical (shifted register). */
2983 if (insn_bits24_27
== 0x0a)
2984 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2986 else if (insn_bits24_27
== 0x0b)
2987 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2989 return AARCH64_RECORD_UNKNOWN
;
2991 record_buf
[0] = reg_rd
;
2992 aarch64_insn_r
->reg_rec_count
= 1;
2994 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2998 if (insn_bits24_27
== 0x0b)
3000 /* Data-processing (3 source). */
3001 record_buf
[0] = reg_rd
;
3002 aarch64_insn_r
->reg_rec_count
= 1;
3004 else if (insn_bits24_27
== 0x0a)
3006 if (insn_bits21_23
== 0x00)
3008 /* Add/subtract (with carry). */
3009 record_buf
[0] = reg_rd
;
3010 aarch64_insn_r
->reg_rec_count
= 1;
3011 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3013 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3014 aarch64_insn_r
->reg_rec_count
= 2;
3017 else if (insn_bits21_23
== 0x02)
3019 /* Conditional compare (register) and conditional compare
3020 (immediate) instructions. */
3021 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3022 aarch64_insn_r
->reg_rec_count
= 1;
3024 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3026 /* CConditional select. */
3027 /* Data-processing (2 source). */
3028 /* Data-processing (1 source). */
3029 record_buf
[0] = reg_rd
;
3030 aarch64_insn_r
->reg_rec_count
= 1;
3033 return AARCH64_RECORD_UNKNOWN
;
3037 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3039 return AARCH64_RECORD_SUCCESS
;
3042 /* Record handler for data processing - immediate instructions. */
3045 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3047 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
3048 uint32_t record_buf
[4];
3050 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3051 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3052 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3053 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3055 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3056 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3057 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3059 record_buf
[0] = reg_rd
;
3060 aarch64_insn_r
->reg_rec_count
= 1;
3062 else if (insn_bits24_27
== 0x01)
3064 /* Add/Subtract (immediate). */
3065 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3066 record_buf
[0] = reg_rd
;
3067 aarch64_insn_r
->reg_rec_count
= 1;
3069 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3071 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3073 /* Logical (immediate). */
3074 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3075 record_buf
[0] = reg_rd
;
3076 aarch64_insn_r
->reg_rec_count
= 1;
3078 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3081 return AARCH64_RECORD_UNKNOWN
;
3083 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3085 return AARCH64_RECORD_SUCCESS
;
3088 /* Record handler for branch, exception generation and system instructions. */
3091 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3093 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3094 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3095 uint32_t record_buf
[4];
3097 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3098 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3099 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3101 if (insn_bits28_31
== 0x0d)
3103 /* Exception generation instructions. */
3104 if (insn_bits24_27
== 0x04)
3106 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3107 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3108 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3110 ULONGEST svc_number
;
3112 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3114 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3118 return AARCH64_RECORD_UNSUPPORTED
;
3120 /* System instructions. */
3121 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3123 uint32_t reg_rt
, reg_crn
;
3125 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3126 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3128 /* Record rt in case of sysl and mrs instructions. */
3129 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3131 record_buf
[0] = reg_rt
;
3132 aarch64_insn_r
->reg_rec_count
= 1;
3134 /* Record cpsr for hint and msr(immediate) instructions. */
3135 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3137 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3138 aarch64_insn_r
->reg_rec_count
= 1;
3141 /* Unconditional branch (register). */
3142 else if((insn_bits24_27
& 0x0e) == 0x06)
3144 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3145 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3146 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3149 return AARCH64_RECORD_UNKNOWN
;
3151 /* Unconditional branch (immediate). */
3152 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3154 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3155 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3156 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3159 /* Compare & branch (immediate), Test & branch (immediate) and
3160 Conditional branch (immediate). */
3161 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3163 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3165 return AARCH64_RECORD_SUCCESS
;
3168 /* Record handler for advanced SIMD load and store instructions. */
3171 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3174 uint64_t addr_offset
= 0;
3175 uint32_t record_buf
[24];
3176 uint64_t record_buf_mem
[24];
3177 uint32_t reg_rn
, reg_rt
;
3178 uint32_t reg_index
= 0, mem_index
= 0;
3179 uint8_t opcode_bits
, size_bits
;
3181 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3182 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3183 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3184 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3185 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3189 fprintf_unfiltered (gdb_stdlog
,
3190 "Process record: Advanced SIMD load/store\n");
3193 /* Load/store single structure. */
3194 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3196 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3197 scale
= opcode_bits
>> 2;
3198 selem
= ((opcode_bits
& 0x02) |
3199 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3203 if (size_bits
& 0x01)
3204 return AARCH64_RECORD_UNKNOWN
;
3207 if ((size_bits
>> 1) & 0x01)
3208 return AARCH64_RECORD_UNKNOWN
;
3209 if (size_bits
& 0x01)
3211 if (!((opcode_bits
>> 1) & 0x01))
3214 return AARCH64_RECORD_UNKNOWN
;
3218 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3225 return AARCH64_RECORD_UNKNOWN
;
3231 for (sindex
= 0; sindex
< selem
; sindex
++)
3233 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3234 reg_rt
= (reg_rt
+ 1) % 32;
3238 for (sindex
= 0; sindex
< selem
; sindex
++)
3239 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3240 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3243 record_buf_mem
[mem_index
++] = esize
/ 8;
3244 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3246 addr_offset
= addr_offset
+ (esize
/ 8);
3247 reg_rt
= (reg_rt
+ 1) % 32;
3250 /* Load/store multiple structure. */
3253 uint8_t selem
, esize
, rpt
, elements
;
3254 uint8_t eindex
, rindex
;
3256 esize
= 8 << size_bits
;
3257 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3258 elements
= 128 / esize
;
3260 elements
= 64 / esize
;
3262 switch (opcode_bits
)
3264 /*LD/ST4 (4 Registers). */
3269 /*LD/ST1 (4 Registers). */
3274 /*LD/ST3 (3 Registers). */
3279 /*LD/ST1 (3 Registers). */
3284 /*LD/ST1 (1 Register). */
3289 /*LD/ST2 (2 Registers). */
3294 /*LD/ST1 (2 Registers). */
3300 return AARCH64_RECORD_UNSUPPORTED
;
3303 for (rindex
= 0; rindex
< rpt
; rindex
++)
3304 for (eindex
= 0; eindex
< elements
; eindex
++)
3306 uint8_t reg_tt
, sindex
;
3307 reg_tt
= (reg_rt
+ rindex
) % 32;
3308 for (sindex
= 0; sindex
< selem
; sindex
++)
3310 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3311 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3314 record_buf_mem
[mem_index
++] = esize
/ 8;
3315 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3317 addr_offset
= addr_offset
+ (esize
/ 8);
3318 reg_tt
= (reg_tt
+ 1) % 32;
3323 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3324 record_buf
[reg_index
++] = reg_rn
;
3326 aarch64_insn_r
->reg_rec_count
= reg_index
;
3327 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3328 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3330 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3332 return AARCH64_RECORD_SUCCESS
;
3335 /* Record handler for load and store instructions. */
3338 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3340 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3341 uint8_t insn_bit23
, insn_bit21
;
3342 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3343 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3344 uint64_t datasize
, offset
;
3345 uint32_t record_buf
[8];
3346 uint64_t record_buf_mem
[8];
3349 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3350 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3351 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3352 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3353 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3354 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3355 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3356 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3357 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3358 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3359 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3361 /* Load/store exclusive. */
3362 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3366 fprintf_unfiltered (gdb_stdlog
,
3367 "Process record: load/store exclusive\n");
3372 record_buf
[0] = reg_rt
;
3373 aarch64_insn_r
->reg_rec_count
= 1;
3376 record_buf
[1] = reg_rt2
;
3377 aarch64_insn_r
->reg_rec_count
= 2;
3383 datasize
= (8 << size_bits
) * 2;
3385 datasize
= (8 << size_bits
);
3386 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3388 record_buf_mem
[0] = datasize
/ 8;
3389 record_buf_mem
[1] = address
;
3390 aarch64_insn_r
->mem_rec_count
= 1;
3393 /* Save register rs. */
3394 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3395 aarch64_insn_r
->reg_rec_count
= 1;
3399 /* Load register (literal) instructions decoding. */
3400 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3404 fprintf_unfiltered (gdb_stdlog
,
3405 "Process record: load register (literal)\n");
3408 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3410 record_buf
[0] = reg_rt
;
3411 aarch64_insn_r
->reg_rec_count
= 1;
3413 /* All types of load/store pair instructions decoding. */
3414 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3418 fprintf_unfiltered (gdb_stdlog
,
3419 "Process record: load/store pair\n");
3426 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3427 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3431 record_buf
[0] = reg_rt
;
3432 record_buf
[1] = reg_rt2
;
3434 aarch64_insn_r
->reg_rec_count
= 2;
3439 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3441 size_bits
= size_bits
>> 1;
3442 datasize
= 8 << (2 + size_bits
);
3443 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3444 offset
= offset
<< (2 + size_bits
);
3445 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3447 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3449 if (imm7_off
& 0x40)
3450 address
= address
- offset
;
3452 address
= address
+ offset
;
3455 record_buf_mem
[0] = datasize
/ 8;
3456 record_buf_mem
[1] = address
;
3457 record_buf_mem
[2] = datasize
/ 8;
3458 record_buf_mem
[3] = address
+ (datasize
/ 8);
3459 aarch64_insn_r
->mem_rec_count
= 2;
3461 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3462 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3464 /* Load/store register (unsigned immediate) instructions. */
3465 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3467 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3474 if (size_bits
!= 0x03)
3477 return AARCH64_RECORD_UNKNOWN
;
3481 fprintf_unfiltered (gdb_stdlog
,
3482 "Process record: load/store (unsigned immediate):"
3483 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3489 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3490 datasize
= 8 << size_bits
;
3491 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3493 offset
= offset
<< size_bits
;
3494 address
= address
+ offset
;
3496 record_buf_mem
[0] = datasize
>> 3;
3497 record_buf_mem
[1] = address
;
3498 aarch64_insn_r
->mem_rec_count
= 1;
3503 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3505 record_buf
[0] = reg_rt
;
3506 aarch64_insn_r
->reg_rec_count
= 1;
3509 /* Load/store register (register offset) instructions. */
3510 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3511 && insn_bits10_11
== 0x02 && insn_bit21
)
3515 fprintf_unfiltered (gdb_stdlog
,
3516 "Process record: load/store (register offset)\n");
3518 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3525 if (size_bits
!= 0x03)
3528 return AARCH64_RECORD_UNKNOWN
;
3532 uint64_t reg_rm_val
;
3533 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3534 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3535 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3536 offset
= reg_rm_val
<< size_bits
;
3538 offset
= reg_rm_val
;
3539 datasize
= 8 << size_bits
;
3540 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3542 address
= address
+ offset
;
3543 record_buf_mem
[0] = datasize
>> 3;
3544 record_buf_mem
[1] = address
;
3545 aarch64_insn_r
->mem_rec_count
= 1;
3550 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3552 record_buf
[0] = reg_rt
;
3553 aarch64_insn_r
->reg_rec_count
= 1;
3556 /* Load/store register (immediate and unprivileged) instructions. */
3557 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3562 fprintf_unfiltered (gdb_stdlog
,
3563 "Process record: load/store (immediate and unprivileged)\n");
3565 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3572 if (size_bits
!= 0x03)
3575 return AARCH64_RECORD_UNKNOWN
;
3580 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3581 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3582 datasize
= 8 << size_bits
;
3583 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3585 if (insn_bits10_11
!= 0x01)
3587 if (imm9_off
& 0x0100)
3588 address
= address
- offset
;
3590 address
= address
+ offset
;
3592 record_buf_mem
[0] = datasize
>> 3;
3593 record_buf_mem
[1] = address
;
3594 aarch64_insn_r
->mem_rec_count
= 1;
3599 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3601 record_buf
[0] = reg_rt
;
3602 aarch64_insn_r
->reg_rec_count
= 1;
3604 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3605 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3607 /* Advanced SIMD load/store instructions. */
3609 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3611 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3613 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3615 return AARCH64_RECORD_SUCCESS
;
3618 /* Record handler for data processing SIMD and floating point instructions. */
3621 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3623 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3624 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3625 uint8_t insn_bits11_14
;
3626 uint32_t record_buf
[2];
3628 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3629 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3630 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3631 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3632 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3633 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3634 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3635 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3636 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3640 fprintf_unfiltered (gdb_stdlog
,
3641 "Process record: data processing SIMD/FP: ");
3644 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3646 /* Floating point - fixed point conversion instructions. */
3650 fprintf_unfiltered (gdb_stdlog
, "FP - fixed point conversion");
3652 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3653 record_buf
[0] = reg_rd
;
3655 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3657 /* Floating point - conditional compare instructions. */
3658 else if (insn_bits10_11
== 0x01)
3661 fprintf_unfiltered (gdb_stdlog
, "FP - conditional compare");
3663 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3665 /* Floating point - data processing (2-source) and
3666 conditional select instructions. */
3667 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3670 fprintf_unfiltered (gdb_stdlog
, "FP - DP (2-source)");
3672 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3674 else if (insn_bits10_11
== 0x00)
3676 /* Floating point - immediate instructions. */
3677 if ((insn_bits12_15
& 0x01) == 0x01
3678 || (insn_bits12_15
& 0x07) == 0x04)
3681 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3682 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3684 /* Floating point - compare instructions. */
3685 else if ((insn_bits12_15
& 0x03) == 0x02)
3688 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3689 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3691 /* Floating point - integer conversions instructions. */
3692 else if (insn_bits12_15
== 0x00)
3694 /* Convert float to integer instruction. */
3695 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3698 fprintf_unfiltered (gdb_stdlog
, "float to int conversion");
3700 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3702 /* Convert integer to float instruction. */
3703 else if ((opcode
>> 1) == 0x01 && !rmode
)
3706 fprintf_unfiltered (gdb_stdlog
, "int to float conversion");
3708 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3710 /* Move float to integer instruction. */
3711 else if ((opcode
>> 1) == 0x03)
3714 fprintf_unfiltered (gdb_stdlog
, "move float to int");
3716 if (!(opcode
& 0x01))
3717 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3719 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3722 return AARCH64_RECORD_UNKNOWN
;
3725 return AARCH64_RECORD_UNKNOWN
;
3728 return AARCH64_RECORD_UNKNOWN
;
3730 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3733 fprintf_unfiltered (gdb_stdlog
, "SIMD copy");
3735 /* Advanced SIMD copy instructions. */
3736 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3737 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3738 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3740 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3741 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3743 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3746 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3748 /* All remaining floating point or advanced SIMD instructions. */
3752 fprintf_unfiltered (gdb_stdlog
, "all remain");
3754 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3758 fprintf_unfiltered (gdb_stdlog
, "\n");
3760 aarch64_insn_r
->reg_rec_count
++;
3761 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3762 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3764 return AARCH64_RECORD_SUCCESS
;
3767 /* Decodes insns type and invokes its record handler. */
3770 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3772 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3774 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3775 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3776 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3777 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3779 /* Data processing - immediate instructions. */
3780 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3781 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3783 /* Branch, exception generation and system instructions. */
3784 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3785 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3787 /* Load and store instructions. */
3788 if (!ins_bit25
&& ins_bit27
)
3789 return aarch64_record_load_store (aarch64_insn_r
);
3791 /* Data processing - register instructions. */
3792 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3793 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3795 /* Data processing - SIMD and floating point instructions. */
3796 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3797 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3799 return AARCH64_RECORD_UNSUPPORTED
;
3802 /* Cleans up local record registers and memory allocations. */
3805 deallocate_reg_mem (insn_decode_record
*record
)
3807 xfree (record
->aarch64_regs
);
3808 xfree (record
->aarch64_mems
);
3811 /* Parse the current instruction and record the values of the registers and
3812 memory that will be changed in current instruction to record_arch_list
3813 return -1 if something is wrong. */
3816 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3817 CORE_ADDR insn_addr
)
3819 uint32_t rec_no
= 0;
3820 uint8_t insn_size
= 4;
3822 ULONGEST t_bit
= 0, insn_id
= 0;
3823 gdb_byte buf
[insn_size
];
3824 insn_decode_record aarch64_record
;
3826 memset (&buf
[0], 0, insn_size
);
3827 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3828 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3829 aarch64_record
.aarch64_insn
3830 = (uint32_t) extract_unsigned_integer (&buf
[0],
3832 gdbarch_byte_order (gdbarch
));
3833 aarch64_record
.regcache
= regcache
;
3834 aarch64_record
.this_addr
= insn_addr
;
3835 aarch64_record
.gdbarch
= gdbarch
;
3837 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3838 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3840 printf_unfiltered (_("Process record does not support instruction "
3841 "0x%0x at address %s.\n"),
3842 aarch64_record
.aarch64_insn
,
3843 paddress (gdbarch
, insn_addr
));
3849 /* Record registers. */
3850 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3852 /* Always record register CPSR. */
3853 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3854 AARCH64_CPSR_REGNUM
);
3855 if (aarch64_record
.aarch64_regs
)
3856 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3857 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3858 aarch64_record
.aarch64_regs
[rec_no
]))
3861 /* Record memories. */
3862 if (aarch64_record
.aarch64_mems
)
3863 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3864 if (record_full_arch_list_add_mem
3865 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3866 aarch64_record
.aarch64_mems
[rec_no
].len
))
3869 if (record_full_arch_list_add_end ())
3873 deallocate_reg_mem (&aarch64_record
);