1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69 /* Pseudo register base numbers. */
70 #define AARCH64_Q0_REGNUM 0
71 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
72 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
76 /* The standard register names, and all the valid aliases for them. */
79 const char *const name
;
81 } aarch64_register_aliases
[] =
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM
},
85 {"lr", AARCH64_LR_REGNUM
},
86 {"sp", AARCH64_SP_REGNUM
},
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM
+ 0},
90 {"w1", AARCH64_X0_REGNUM
+ 1},
91 {"w2", AARCH64_X0_REGNUM
+ 2},
92 {"w3", AARCH64_X0_REGNUM
+ 3},
93 {"w4", AARCH64_X0_REGNUM
+ 4},
94 {"w5", AARCH64_X0_REGNUM
+ 5},
95 {"w6", AARCH64_X0_REGNUM
+ 6},
96 {"w7", AARCH64_X0_REGNUM
+ 7},
97 {"w8", AARCH64_X0_REGNUM
+ 8},
98 {"w9", AARCH64_X0_REGNUM
+ 9},
99 {"w10", AARCH64_X0_REGNUM
+ 10},
100 {"w11", AARCH64_X0_REGNUM
+ 11},
101 {"w12", AARCH64_X0_REGNUM
+ 12},
102 {"w13", AARCH64_X0_REGNUM
+ 13},
103 {"w14", AARCH64_X0_REGNUM
+ 14},
104 {"w15", AARCH64_X0_REGNUM
+ 15},
105 {"w16", AARCH64_X0_REGNUM
+ 16},
106 {"w17", AARCH64_X0_REGNUM
+ 17},
107 {"w18", AARCH64_X0_REGNUM
+ 18},
108 {"w19", AARCH64_X0_REGNUM
+ 19},
109 {"w20", AARCH64_X0_REGNUM
+ 20},
110 {"w21", AARCH64_X0_REGNUM
+ 21},
111 {"w22", AARCH64_X0_REGNUM
+ 22},
112 {"w23", AARCH64_X0_REGNUM
+ 23},
113 {"w24", AARCH64_X0_REGNUM
+ 24},
114 {"w25", AARCH64_X0_REGNUM
+ 25},
115 {"w26", AARCH64_X0_REGNUM
+ 26},
116 {"w27", AARCH64_X0_REGNUM
+ 27},
117 {"w28", AARCH64_X0_REGNUM
+ 28},
118 {"w29", AARCH64_X0_REGNUM
+ 29},
119 {"w30", AARCH64_X0_REGNUM
+ 30},
122 {"ip0", AARCH64_X0_REGNUM
+ 16},
123 {"ip1", AARCH64_X0_REGNUM
+ 17}
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names
[] =
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names
[] =
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
159 /* AArch64 prologue cache structure. */
160 struct aarch64_prologue_cache
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
176 /* Is the target available to read from? */
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
184 /* The register used to hold the frame pointer for this frame. */
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg
*saved_regs
;
192 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
193 struct cmd_list_element
*c
, const char *value
)
195 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
198 /* Analyze a prologue, looking for a recognizable stack frame
199 and frame pointer. Scan until we encounter a store that could
200 clobber the stack frame unexpectedly, or an unknown instruction. */
203 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
204 CORE_ADDR start
, CORE_ADDR limit
,
205 struct aarch64_prologue_cache
*cache
)
207 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
209 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
210 struct pv_area
*stack
;
211 struct cleanup
*back_to
;
213 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
214 regs
[i
] = pv_register (i
, 0);
215 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
216 back_to
= make_cleanup_free_pv_area (stack
);
218 for (; start
< limit
; start
+= 4)
223 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
225 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
228 if (inst
.opcode
->iclass
== addsub_imm
229 && (inst
.opcode
->op
== OP_ADD
230 || strcmp ("sub", inst
.opcode
->name
) == 0))
232 unsigned rd
= inst
.operands
[0].reg
.regno
;
233 unsigned rn
= inst
.operands
[1].reg
.regno
;
235 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
236 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
237 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
238 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
240 if (inst
.opcode
->op
== OP_ADD
)
242 regs
[rd
] = pv_add_constant (regs
[rn
],
243 inst
.operands
[2].imm
.value
);
247 regs
[rd
] = pv_add_constant (regs
[rn
],
248 -inst
.operands
[2].imm
.value
);
251 else if (inst
.opcode
->iclass
== pcreladdr
252 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
254 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
255 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
257 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
259 else if (inst
.opcode
->iclass
== branch_imm
)
261 /* Stop analysis on branch. */
264 else if (inst
.opcode
->iclass
== condbranch
)
266 /* Stop analysis on branch. */
269 else if (inst
.opcode
->iclass
== branch_reg
)
271 /* Stop analysis on branch. */
274 else if (inst
.opcode
->iclass
== compbranch
)
276 /* Stop analysis on branch. */
279 else if (inst
.opcode
->op
== OP_MOVZ
)
281 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
282 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
284 else if (inst
.opcode
->iclass
== log_shift
285 && strcmp (inst
.opcode
->name
, "orr") == 0)
287 unsigned rd
= inst
.operands
[0].reg
.regno
;
288 unsigned rn
= inst
.operands
[1].reg
.regno
;
289 unsigned rm
= inst
.operands
[2].reg
.regno
;
291 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
292 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
293 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
295 if (inst
.operands
[2].shifter
.amount
== 0
296 && rn
== AARCH64_SP_REGNUM
)
302 debug_printf ("aarch64: prologue analysis gave up "
303 "addr=%s opcode=0x%x (orr x register)\n",
304 core_addr_to_string_nz (start
), insn
);
309 else if (inst
.opcode
->op
== OP_STUR
)
311 unsigned rt
= inst
.operands
[0].reg
.regno
;
312 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
314 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
316 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
317 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
318 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
319 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
321 pv_area_store (stack
, pv_add_constant (regs
[rn
],
322 inst
.operands
[1].addr
.offset
.imm
),
323 is64
? 8 : 4, regs
[rt
]);
325 else if ((inst
.opcode
->iclass
== ldstpair_off
326 || (inst
.opcode
->iclass
== ldstpair_indexed
327 && inst
.operands
[2].addr
.preind
))
328 && strcmp ("stp", inst
.opcode
->name
) == 0)
330 /* STP with addressing mode Pre-indexed and Base register. */
331 unsigned rt1
= inst
.operands
[0].reg
.regno
;
332 unsigned rt2
= inst
.operands
[1].reg
.regno
;
333 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
334 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
336 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
337 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
);
338 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
339 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
341 /* If recording this store would invalidate the store area
342 (perhaps because rn is not known) then we should abandon
343 further prologue analysis. */
344 if (pv_area_store_would_trash (stack
,
345 pv_add_constant (regs
[rn
], imm
)))
348 if (pv_area_store_would_trash (stack
,
349 pv_add_constant (regs
[rn
], imm
+ 8)))
352 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
354 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
357 if (inst
.operands
[2].addr
.writeback
)
358 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
361 else if (inst
.opcode
->iclass
== testbranch
)
363 /* Stop analysis on branch. */
370 debug_printf ("aarch64: prologue analysis gave up addr=%s"
372 core_addr_to_string_nz (start
), insn
);
380 do_cleanups (back_to
);
384 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
386 /* Frame pointer is fp. Frame size is constant. */
387 cache
->framereg
= AARCH64_FP_REGNUM
;
388 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
390 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
392 /* Try the stack pointer. */
393 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
394 cache
->framereg
= AARCH64_SP_REGNUM
;
398 /* We're just out of luck. We don't know where the frame is. */
399 cache
->framereg
= -1;
400 cache
->framesize
= 0;
403 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
407 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
408 cache
->saved_regs
[i
].addr
= offset
;
411 do_cleanups (back_to
);
415 /* Implement the "skip_prologue" gdbarch method. */
418 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
420 CORE_ADDR func_addr
, limit_pc
;
422 /* See if we can determine the end of the prologue via the symbol
423 table. If so, then return either PC, or the PC after the
424 prologue, whichever is greater. */
425 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
427 CORE_ADDR post_prologue_pc
428 = skip_prologue_using_sal (gdbarch
, func_addr
);
430 if (post_prologue_pc
!= 0)
431 return std::max (pc
, post_prologue_pc
);
434 /* Can't determine prologue from the symbol table, need to examine
437 /* Find an upper limit on the function prologue using the debug
438 information. If the debug information could not be used to
439 provide that bound, then use an arbitrary large number as the
441 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
443 limit_pc
= pc
+ 128; /* Magic. */
445 /* Try disassembling prologue. */
446 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
449 /* Scan the function prologue for THIS_FRAME and populate the prologue
453 aarch64_scan_prologue (struct frame_info
*this_frame
,
454 struct aarch64_prologue_cache
*cache
)
456 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
457 CORE_ADDR prologue_start
;
458 CORE_ADDR prologue_end
;
459 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
460 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
462 cache
->prev_pc
= prev_pc
;
464 /* Assume we do not find a frame. */
465 cache
->framereg
= -1;
466 cache
->framesize
= 0;
468 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
471 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
475 /* No line info so use the current PC. */
476 prologue_end
= prev_pc
;
478 else if (sal
.end
< prologue_end
)
480 /* The next line begins after the function end. */
481 prologue_end
= sal
.end
;
484 prologue_end
= std::min (prologue_end
, prev_pc
);
485 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
491 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
495 cache
->framereg
= AARCH64_FP_REGNUM
;
496 cache
->framesize
= 16;
497 cache
->saved_regs
[29].addr
= 0;
498 cache
->saved_regs
[30].addr
= 8;
502 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
503 function may throw an exception if the inferior's registers or memory is
507 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
508 struct aarch64_prologue_cache
*cache
)
510 CORE_ADDR unwound_fp
;
513 aarch64_scan_prologue (this_frame
, cache
);
515 if (cache
->framereg
== -1)
518 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
522 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
524 /* Calculate actual addresses of saved registers using offsets
525 determined by aarch64_analyze_prologue. */
526 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
527 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
528 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
530 cache
->func
= get_frame_func (this_frame
);
532 cache
->available_p
= 1;
535 /* Allocate and fill in *THIS_CACHE with information about the prologue of
536 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
537 Return a pointer to the current aarch64_prologue_cache in
540 static struct aarch64_prologue_cache
*
541 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
543 struct aarch64_prologue_cache
*cache
;
545 if (*this_cache
!= NULL
)
546 return (struct aarch64_prologue_cache
*) *this_cache
;
548 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
549 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
554 aarch64_make_prologue_cache_1 (this_frame
, cache
);
556 CATCH (ex
, RETURN_MASK_ERROR
)
558 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
559 throw_exception (ex
);
566 /* Implement the "stop_reason" frame_unwind method. */
568 static enum unwind_stop_reason
569 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
572 struct aarch64_prologue_cache
*cache
573 = aarch64_make_prologue_cache (this_frame
, this_cache
);
575 if (!cache
->available_p
)
576 return UNWIND_UNAVAILABLE
;
578 /* Halt the backtrace at "_start". */
579 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
580 return UNWIND_OUTERMOST
;
582 /* We've hit a wall, stop. */
583 if (cache
->prev_sp
== 0)
584 return UNWIND_OUTERMOST
;
586 return UNWIND_NO_REASON
;
589 /* Our frame ID for a normal frame is the current function's starting
590 PC and the caller's SP when we were called. */
593 aarch64_prologue_this_id (struct frame_info
*this_frame
,
594 void **this_cache
, struct frame_id
*this_id
)
596 struct aarch64_prologue_cache
*cache
597 = aarch64_make_prologue_cache (this_frame
, this_cache
);
599 if (!cache
->available_p
)
600 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
602 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
605 /* Implement the "prev_register" frame_unwind method. */
607 static struct value
*
608 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
609 void **this_cache
, int prev_regnum
)
611 struct aarch64_prologue_cache
*cache
612 = aarch64_make_prologue_cache (this_frame
, this_cache
);
614 /* If we are asked to unwind the PC, then we need to return the LR
615 instead. The prologue may save PC, but it will point into this
616 frame's prologue, not the next frame's resume location. */
617 if (prev_regnum
== AARCH64_PC_REGNUM
)
621 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
622 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
625 /* SP is generally not saved to the stack, but this frame is
626 identified by the next frame's stack pointer at the time of the
627 call. The value was already reconstructed into PREV_SP. */
640 if (prev_regnum
== AARCH64_SP_REGNUM
)
641 return frame_unwind_got_constant (this_frame
, prev_regnum
,
644 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
648 /* AArch64 prologue unwinder. */
649 struct frame_unwind aarch64_prologue_unwind
=
652 aarch64_prologue_frame_unwind_stop_reason
,
653 aarch64_prologue_this_id
,
654 aarch64_prologue_prev_register
,
656 default_frame_sniffer
659 /* Allocate and fill in *THIS_CACHE with information about the prologue of
660 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
661 Return a pointer to the current aarch64_prologue_cache in
664 static struct aarch64_prologue_cache
*
665 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
667 struct aarch64_prologue_cache
*cache
;
669 if (*this_cache
!= NULL
)
670 return (struct aarch64_prologue_cache
*) *this_cache
;
672 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
673 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
678 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
680 cache
->prev_pc
= get_frame_pc (this_frame
);
681 cache
->available_p
= 1;
683 CATCH (ex
, RETURN_MASK_ERROR
)
685 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
686 throw_exception (ex
);
693 /* Implement the "stop_reason" frame_unwind method. */
695 static enum unwind_stop_reason
696 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
699 struct aarch64_prologue_cache
*cache
700 = aarch64_make_stub_cache (this_frame
, this_cache
);
702 if (!cache
->available_p
)
703 return UNWIND_UNAVAILABLE
;
705 return UNWIND_NO_REASON
;
708 /* Our frame ID for a stub frame is the current SP and LR. */
711 aarch64_stub_this_id (struct frame_info
*this_frame
,
712 void **this_cache
, struct frame_id
*this_id
)
714 struct aarch64_prologue_cache
*cache
715 = aarch64_make_stub_cache (this_frame
, this_cache
);
717 if (cache
->available_p
)
718 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
720 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
723 /* Implement the "sniffer" frame_unwind method. */
726 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
727 struct frame_info
*this_frame
,
728 void **this_prologue_cache
)
730 CORE_ADDR addr_in_block
;
733 addr_in_block
= get_frame_address_in_block (this_frame
);
734 if (in_plt_section (addr_in_block
)
735 /* We also use the stub winder if the target memory is unreadable
736 to avoid having the prologue unwinder trying to read it. */
737 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
743 /* AArch64 stub unwinder. */
744 struct frame_unwind aarch64_stub_unwind
=
747 aarch64_stub_frame_unwind_stop_reason
,
748 aarch64_stub_this_id
,
749 aarch64_prologue_prev_register
,
751 aarch64_stub_unwind_sniffer
754 /* Return the frame base address of *THIS_FRAME. */
757 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
759 struct aarch64_prologue_cache
*cache
760 = aarch64_make_prologue_cache (this_frame
, this_cache
);
762 return cache
->prev_sp
- cache
->framesize
;
765 /* AArch64 default frame base information. */
766 struct frame_base aarch64_normal_base
=
768 &aarch64_prologue_unwind
,
769 aarch64_normal_frame_base
,
770 aarch64_normal_frame_base
,
771 aarch64_normal_frame_base
774 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
775 dummy frame. The frame ID's base needs to match the TOS value
776 saved by save_dummy_frame_tos () and returned from
777 aarch64_push_dummy_call, and the PC needs to match the dummy
778 frame's breakpoint. */
780 static struct frame_id
781 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
783 return frame_id_build (get_frame_register_unsigned (this_frame
,
785 get_frame_pc (this_frame
));
788 /* Implement the "unwind_pc" gdbarch method. */
791 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
794 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
799 /* Implement the "unwind_sp" gdbarch method. */
802 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
804 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
807 /* Return the value of the REGNUM register in the previous frame of
810 static struct value
*
811 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
812 void **this_cache
, int regnum
)
818 case AARCH64_PC_REGNUM
:
819 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
820 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
823 internal_error (__FILE__
, __LINE__
,
824 _("Unexpected register %d"), regnum
);
828 /* Implement the "init_reg" dwarf2_frame_ops method. */
831 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
832 struct dwarf2_frame_state_reg
*reg
,
833 struct frame_info
*this_frame
)
837 case AARCH64_PC_REGNUM
:
838 reg
->how
= DWARF2_FRAME_REG_FN
;
839 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
841 case AARCH64_SP_REGNUM
:
842 reg
->how
= DWARF2_FRAME_REG_CFA
;
847 /* When arguments must be pushed onto the stack, they go on in reverse
848 order. The code below implements a FILO (stack) to do this. */
852 /* Value to pass on stack. It can be NULL if this item is for stack
854 const gdb_byte
*data
;
856 /* Size in bytes of value to pass on stack. */
860 DEF_VEC_O (stack_item_t
);
862 /* Return the alignment (in bytes) of the given type. */
865 aarch64_type_align (struct type
*t
)
871 t
= check_typedef (t
);
872 switch (TYPE_CODE (t
))
875 /* Should never happen. */
876 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
884 case TYPE_CODE_RANGE
:
885 case TYPE_CODE_BITSTRING
:
889 return TYPE_LENGTH (t
);
891 case TYPE_CODE_ARRAY
:
894 /* Use the natural alignment for vector types (the same for
895 scalar type), but the maximum alignment is 128-bit. */
896 if (TYPE_LENGTH (t
) > 16)
899 return TYPE_LENGTH (t
);
902 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
903 case TYPE_CODE_COMPLEX
:
904 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
906 case TYPE_CODE_STRUCT
:
907 case TYPE_CODE_UNION
:
909 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
911 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
919 /* Return 1 if *TY is a homogeneous floating-point aggregate or
920 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
921 document; otherwise return 0. */
924 is_hfa_or_hva (struct type
*ty
)
926 switch (TYPE_CODE (ty
))
928 case TYPE_CODE_ARRAY
:
930 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
932 if (TYPE_VECTOR (ty
))
935 if (TYPE_LENGTH (ty
) <= 4 /* HFA or HVA has at most 4 members. */
936 && (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
/* HFA */
937 || (TYPE_CODE (target_ty
) == TYPE_CODE_ARRAY
/* HVA */
938 && TYPE_VECTOR (target_ty
))))
943 case TYPE_CODE_UNION
:
944 case TYPE_CODE_STRUCT
:
946 /* HFA or HVA has at most four members. */
947 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
949 struct type
*member0_type
;
951 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
952 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
953 || (TYPE_CODE (member0_type
) == TYPE_CODE_ARRAY
954 && TYPE_VECTOR (member0_type
)))
958 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
960 struct type
*member1_type
;
962 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
963 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
964 || (TYPE_LENGTH (member0_type
)
965 != TYPE_LENGTH (member1_type
)))
981 /* AArch64 function call information structure. */
982 struct aarch64_call_info
984 /* the current argument number. */
987 /* The next general purpose register number, equivalent to NGRN as
988 described in the AArch64 Procedure Call Standard. */
991 /* The next SIMD and floating point register number, equivalent to
992 NSRN as described in the AArch64 Procedure Call Standard. */
995 /* The next stacked argument address, equivalent to NSAA as
996 described in the AArch64 Procedure Call Standard. */
999 /* Stack item vector. */
1000 VEC(stack_item_t
) *si
;
1003 /* Pass a value in a sequence of consecutive X registers. The caller
1004 is responsbile for ensuring sufficient registers are available. */
1007 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1008 struct aarch64_call_info
*info
, struct type
*type
,
1011 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1012 int len
= TYPE_LENGTH (type
);
1013 enum type_code typecode
= TYPE_CODE (type
);
1014 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1015 const bfd_byte
*buf
= value_contents (arg
);
1021 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1022 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1026 /* Adjust sub-word struct/union args when big-endian. */
1027 if (byte_order
== BFD_ENDIAN_BIG
1028 && partial_len
< X_REGISTER_SIZE
1029 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1030 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1034 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1035 gdbarch_register_name (gdbarch
, regnum
),
1036 phex (regval
, X_REGISTER_SIZE
));
1038 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1045 /* Attempt to marshall a value in a V register. Return 1 if
1046 successful, or 0 if insufficient registers are available. This
1047 function, unlike the equivalent pass_in_x() function does not
1048 handle arguments spread across multiple registers. */
1051 pass_in_v (struct gdbarch
*gdbarch
,
1052 struct regcache
*regcache
,
1053 struct aarch64_call_info
*info
,
1054 int len
, const bfd_byte
*buf
)
1058 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1059 gdb_byte reg
[V_REGISTER_SIZE
];
1064 memset (reg
, 0, sizeof (reg
));
1065 /* PCS C.1, the argument is allocated to the least significant
1066 bits of V register. */
1067 memcpy (reg
, buf
, len
);
1068 regcache_cooked_write (regcache
, regnum
, reg
);
1072 debug_printf ("arg %d in %s\n", info
->argnum
,
1073 gdbarch_register_name (gdbarch
, regnum
));
1081 /* Marshall an argument onto the stack. */
1084 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1087 const bfd_byte
*buf
= value_contents (arg
);
1088 int len
= TYPE_LENGTH (type
);
1094 align
= aarch64_type_align (type
);
1096 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1097 Natural alignment of the argument's type. */
1098 align
= align_up (align
, 8);
1100 /* The AArch64 PCS requires at most doubleword alignment. */
1106 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1112 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1115 if (info
->nsaa
& (align
- 1))
1117 /* Push stack alignment padding. */
1118 int pad
= align
- (info
->nsaa
& (align
- 1));
1123 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1128 /* Marshall an argument into a sequence of one or more consecutive X
1129 registers or, if insufficient X registers are available then onto
1133 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1134 struct aarch64_call_info
*info
, struct type
*type
,
1137 int len
= TYPE_LENGTH (type
);
1138 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1140 /* PCS C.13 - Pass in registers if we have enough spare */
1141 if (info
->ngrn
+ nregs
<= 8)
1143 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1144 info
->ngrn
+= nregs
;
1149 pass_on_stack (info
, type
, arg
);
1153 /* Pass a value in a V register, or on the stack if insufficient are
1157 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1158 struct regcache
*regcache
,
1159 struct aarch64_call_info
*info
,
1163 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (type
),
1164 value_contents (arg
)))
1165 pass_on_stack (info
, type
, arg
);
1168 /* Implement the "push_dummy_call" gdbarch method. */
1171 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1172 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1174 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1175 CORE_ADDR struct_addr
)
1178 struct aarch64_call_info info
;
1179 struct type
*func_type
;
1180 struct type
*return_type
;
1181 int lang_struct_return
;
1183 memset (&info
, 0, sizeof (info
));
1185 /* We need to know what the type of the called function is in order
1186 to determine the number of named/anonymous arguments for the
1187 actual argument placement, and the return type in order to handle
1188 return value correctly.
1190 The generic code above us views the decision of return in memory
1191 or return in registers as a two stage processes. The language
1192 handler is consulted first and may decide to return in memory (eg
1193 class with copy constructor returned by value), this will cause
1194 the generic code to allocate space AND insert an initial leading
1197 If the language code does not decide to pass in memory then the
1198 target code is consulted.
1200 If the language code decides to pass in memory we want to move
1201 the pointer inserted as the initial argument from the argument
1202 list and into X8, the conventional AArch64 struct return pointer
1205 This is slightly awkward, ideally the flag "lang_struct_return"
1206 would be passed to the targets implementation of push_dummy_call.
1207 Rather that change the target interface we call the language code
1208 directly ourselves. */
1210 func_type
= check_typedef (value_type (function
));
1212 /* Dereference function pointer types. */
1213 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1214 func_type
= TYPE_TARGET_TYPE (func_type
);
1216 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1217 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1219 /* If language_pass_by_reference () returned true we will have been
1220 given an additional initial argument, a hidden pointer to the
1221 return slot in memory. */
1222 return_type
= TYPE_TARGET_TYPE (func_type
);
1223 lang_struct_return
= language_pass_by_reference (return_type
);
1225 /* Set the return address. For the AArch64, the return breakpoint
1226 is always at BP_ADDR. */
1227 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1229 /* If we were given an initial argument for the return slot because
1230 lang_struct_return was true, lose it. */
1231 if (lang_struct_return
)
1237 /* The struct_return pointer occupies X8. */
1238 if (struct_return
|| lang_struct_return
)
1242 debug_printf ("struct return in %s = 0x%s\n",
1243 gdbarch_register_name (gdbarch
,
1244 AARCH64_STRUCT_RETURN_REGNUM
),
1245 paddress (gdbarch
, struct_addr
));
1247 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1251 for (argnum
= 0; argnum
< nargs
; argnum
++)
1253 struct value
*arg
= args
[argnum
];
1254 struct type
*arg_type
;
1257 arg_type
= check_typedef (value_type (arg
));
1258 len
= TYPE_LENGTH (arg_type
);
1260 switch (TYPE_CODE (arg_type
))
1263 case TYPE_CODE_BOOL
:
1264 case TYPE_CODE_CHAR
:
1265 case TYPE_CODE_RANGE
:
1266 case TYPE_CODE_ENUM
:
1269 /* Promote to 32 bit integer. */
1270 if (TYPE_UNSIGNED (arg_type
))
1271 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1273 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1274 arg
= value_cast (arg_type
, arg
);
1276 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1279 case TYPE_CODE_COMPLEX
:
1282 const bfd_byte
*buf
= value_contents (arg
);
1283 struct type
*target_type
=
1284 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1286 pass_in_v (gdbarch
, regcache
, &info
,
1287 TYPE_LENGTH (target_type
), buf
);
1288 pass_in_v (gdbarch
, regcache
, &info
,
1289 TYPE_LENGTH (target_type
),
1290 buf
+ TYPE_LENGTH (target_type
));
1295 pass_on_stack (&info
, arg_type
, arg
);
1299 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1302 case TYPE_CODE_STRUCT
:
1303 case TYPE_CODE_ARRAY
:
1304 case TYPE_CODE_UNION
:
1305 if (is_hfa_or_hva (arg_type
))
1307 int elements
= TYPE_NFIELDS (arg_type
);
1309 /* Homogeneous Aggregates */
1310 if (info
.nsrn
+ elements
< 8)
1314 for (i
= 0; i
< elements
; i
++)
1316 /* We know that we have sufficient registers
1317 available therefore this will never fallback
1319 struct value
*field
=
1320 value_primitive_field (arg
, 0, i
, arg_type
);
1321 struct type
*field_type
=
1322 check_typedef (value_type (field
));
1324 pass_in_v_or_stack (gdbarch
, regcache
, &info
,
1331 pass_on_stack (&info
, arg_type
, arg
);
1334 else if (TYPE_CODE (arg_type
) == TYPE_CODE_ARRAY
1335 && TYPE_VECTOR (arg_type
) && (len
== 16 || len
== 8))
1337 /* Short vector types are passed in V registers. */
1338 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1342 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1343 invisible reference. */
1345 /* Allocate aligned storage. */
1346 sp
= align_down (sp
- len
, 16);
1348 /* Write the real data into the stack. */
1349 write_memory (sp
, value_contents (arg
), len
);
1351 /* Construct the indirection. */
1352 arg_type
= lookup_pointer_type (arg_type
);
1353 arg
= value_from_pointer (arg_type
, sp
);
1354 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1357 /* PCS C.15 / C.18 multiple values pass. */
1358 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1362 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1367 /* Make sure stack retains 16 byte alignment. */
1369 sp
-= 16 - (info
.nsaa
& 15);
1371 while (!VEC_empty (stack_item_t
, info
.si
))
1373 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1376 if (si
->data
!= NULL
)
1377 write_memory (sp
, si
->data
, si
->len
);
1378 VEC_pop (stack_item_t
, info
.si
);
1381 VEC_free (stack_item_t
, info
.si
);
1383 /* Finally, update the SP register. */
1384 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1389 /* Implement the "frame_align" gdbarch method. */
1392 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1394 /* Align the stack to sixteen bytes. */
1395 return sp
& ~(CORE_ADDR
) 15;
1398 /* Return the type for an AdvSISD Q register. */
1400 static struct type
*
1401 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1403 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1405 if (tdep
->vnq_type
== NULL
)
1410 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1413 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1414 append_composite_type_field (t
, "u", elem
);
1416 elem
= builtin_type (gdbarch
)->builtin_int128
;
1417 append_composite_type_field (t
, "s", elem
);
1422 return tdep
->vnq_type
;
1425 /* Return the type for an AdvSISD D register. */
1427 static struct type
*
1428 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1430 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1432 if (tdep
->vnd_type
== NULL
)
1437 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1440 elem
= builtin_type (gdbarch
)->builtin_double
;
1441 append_composite_type_field (t
, "f", elem
);
1443 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1444 append_composite_type_field (t
, "u", elem
);
1446 elem
= builtin_type (gdbarch
)->builtin_int64
;
1447 append_composite_type_field (t
, "s", elem
);
1452 return tdep
->vnd_type
;
1455 /* Return the type for an AdvSISD S register. */
1457 static struct type
*
1458 aarch64_vns_type (struct gdbarch
*gdbarch
)
1460 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1462 if (tdep
->vns_type
== NULL
)
1467 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1470 elem
= builtin_type (gdbarch
)->builtin_float
;
1471 append_composite_type_field (t
, "f", elem
);
1473 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1474 append_composite_type_field (t
, "u", elem
);
1476 elem
= builtin_type (gdbarch
)->builtin_int32
;
1477 append_composite_type_field (t
, "s", elem
);
1482 return tdep
->vns_type
;
1485 /* Return the type for an AdvSISD H register. */
1487 static struct type
*
1488 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1490 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1492 if (tdep
->vnh_type
== NULL
)
1497 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1500 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1501 append_composite_type_field (t
, "u", elem
);
1503 elem
= builtin_type (gdbarch
)->builtin_int16
;
1504 append_composite_type_field (t
, "s", elem
);
1509 return tdep
->vnh_type
;
1512 /* Return the type for an AdvSISD B register. */
1514 static struct type
*
1515 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1517 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1519 if (tdep
->vnb_type
== NULL
)
1524 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1527 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1528 append_composite_type_field (t
, "u", elem
);
1530 elem
= builtin_type (gdbarch
)->builtin_int8
;
1531 append_composite_type_field (t
, "s", elem
);
1536 return tdep
->vnb_type
;
1539 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1542 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1544 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1545 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1547 if (reg
== AARCH64_DWARF_SP
)
1548 return AARCH64_SP_REGNUM
;
1550 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1551 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1557 /* Implement the "print_insn" gdbarch method. */
1560 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1562 info
->symbols
= NULL
;
1563 return print_insn_aarch64 (memaddr
, info
);
1566 /* AArch64 BRK software debug mode instruction.
1567 Note that AArch64 code is always little-endian.
1568 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1569 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1571 /* Implement the "breakpoint_from_pc" gdbarch method. */
1573 static const gdb_byte
*
1574 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1577 *lenptr
= sizeof (aarch64_default_breakpoint
);
1578 return aarch64_default_breakpoint
;
1581 /* Extract from an array REGS containing the (raw) register state a
1582 function return value of type TYPE, and copy that, in virtual
1583 format, into VALBUF. */
1586 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1589 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1590 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1592 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1594 bfd_byte buf
[V_REGISTER_SIZE
];
1595 int len
= TYPE_LENGTH (type
);
1597 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1598 memcpy (valbuf
, buf
, len
);
1600 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1601 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1602 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1603 || TYPE_CODE (type
) == TYPE_CODE_PTR
1604 || TYPE_CODE (type
) == TYPE_CODE_REF
1605 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1607 /* If the the type is a plain integer, then the access is
1608 straight-forward. Otherwise we have to play around a bit
1610 int len
= TYPE_LENGTH (type
);
1611 int regno
= AARCH64_X0_REGNUM
;
1616 /* By using store_unsigned_integer we avoid having to do
1617 anything special for small big-endian values. */
1618 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1619 store_unsigned_integer (valbuf
,
1620 (len
> X_REGISTER_SIZE
1621 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1622 len
-= X_REGISTER_SIZE
;
1623 valbuf
+= X_REGISTER_SIZE
;
1626 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
1628 int regno
= AARCH64_V0_REGNUM
;
1629 bfd_byte buf
[V_REGISTER_SIZE
];
1630 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1631 int len
= TYPE_LENGTH (target_type
);
1633 regcache_cooked_read (regs
, regno
, buf
);
1634 memcpy (valbuf
, buf
, len
);
1636 regcache_cooked_read (regs
, regno
+ 1, buf
);
1637 memcpy (valbuf
, buf
, len
);
1640 else if (is_hfa_or_hva (type
))
1642 int elements
= TYPE_NFIELDS (type
);
1643 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1644 int len
= TYPE_LENGTH (member_type
);
1647 for (i
= 0; i
< elements
; i
++)
1649 int regno
= AARCH64_V0_REGNUM
+ i
;
1650 bfd_byte buf
[V_REGISTER_SIZE
];
1654 debug_printf ("read HFA or HVA return value element %d from %s\n",
1656 gdbarch_register_name (gdbarch
, regno
));
1658 regcache_cooked_read (regs
, regno
, buf
);
1660 memcpy (valbuf
, buf
, len
);
1664 else if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (type
)
1665 && (TYPE_LENGTH (type
) == 16 || TYPE_LENGTH (type
) == 8))
1667 /* Short vector is returned in V register. */
1668 gdb_byte buf
[V_REGISTER_SIZE
];
1670 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1671 memcpy (valbuf
, buf
, TYPE_LENGTH (type
));
1675 /* For a structure or union the behaviour is as if the value had
1676 been stored to word-aligned memory and then loaded into
1677 registers with 64-bit load instruction(s). */
1678 int len
= TYPE_LENGTH (type
);
1679 int regno
= AARCH64_X0_REGNUM
;
1680 bfd_byte buf
[X_REGISTER_SIZE
];
1684 regcache_cooked_read (regs
, regno
++, buf
);
1685 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1686 len
-= X_REGISTER_SIZE
;
1687 valbuf
+= X_REGISTER_SIZE
;
1693 /* Will a function return an aggregate type in memory or in a
1694 register? Return 0 if an aggregate type can be returned in a
1695 register, 1 if it must be returned in memory. */
1698 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
1700 type
= check_typedef (type
);
1702 if (is_hfa_or_hva (type
))
1704 /* v0-v7 are used to return values and one register is allocated
1705 for one member. However, HFA or HVA has at most four members. */
1709 if (TYPE_LENGTH (type
) > 16)
1711 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1712 invisible reference. */
1720 /* Write into appropriate registers a function return value of type
1721 TYPE, given in virtual format. */
1724 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
1725 const gdb_byte
*valbuf
)
1727 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1728 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1730 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1732 bfd_byte buf
[V_REGISTER_SIZE
];
1733 int len
= TYPE_LENGTH (type
);
1735 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
1736 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
1738 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1739 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1740 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1741 || TYPE_CODE (type
) == TYPE_CODE_PTR
1742 || TYPE_CODE (type
) == TYPE_CODE_REF
1743 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1745 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
1747 /* Values of one word or less are zero/sign-extended and
1749 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
1750 LONGEST val
= unpack_long (type
, valbuf
);
1752 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
1753 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
1757 /* Integral values greater than one word are stored in
1758 consecutive registers starting with r0. This will always
1759 be a multiple of the regiser size. */
1760 int len
= TYPE_LENGTH (type
);
1761 int regno
= AARCH64_X0_REGNUM
;
1765 regcache_cooked_write (regs
, regno
++, valbuf
);
1766 len
-= X_REGISTER_SIZE
;
1767 valbuf
+= X_REGISTER_SIZE
;
1771 else if (is_hfa_or_hva (type
))
1773 int elements
= TYPE_NFIELDS (type
);
1774 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1775 int len
= TYPE_LENGTH (member_type
);
1778 for (i
= 0; i
< elements
; i
++)
1780 int regno
= AARCH64_V0_REGNUM
+ i
;
1781 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
1785 debug_printf ("write HFA or HVA return value element %d to %s\n",
1787 gdbarch_register_name (gdbarch
, regno
));
1790 memcpy (tmpbuf
, valbuf
, len
);
1791 regcache_cooked_write (regs
, regno
, tmpbuf
);
1795 else if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (type
)
1796 && (TYPE_LENGTH (type
) == 8 || TYPE_LENGTH (type
) == 16))
1799 gdb_byte buf
[V_REGISTER_SIZE
];
1801 memcpy (buf
, valbuf
, TYPE_LENGTH (type
));
1802 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
1806 /* For a structure or union the behaviour is as if the value had
1807 been stored to word-aligned memory and then loaded into
1808 registers with 64-bit load instruction(s). */
1809 int len
= TYPE_LENGTH (type
);
1810 int regno
= AARCH64_X0_REGNUM
;
1811 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
1815 memcpy (tmpbuf
, valbuf
,
1816 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1817 regcache_cooked_write (regs
, regno
++, tmpbuf
);
1818 len
-= X_REGISTER_SIZE
;
1819 valbuf
+= X_REGISTER_SIZE
;
1824 /* Implement the "return_value" gdbarch method. */
1826 static enum return_value_convention
1827 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
1828 struct type
*valtype
, struct regcache
*regcache
,
1829 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
1832 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
1833 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
1834 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
1836 if (aarch64_return_in_memory (gdbarch
, valtype
))
1839 debug_printf ("return value in memory\n");
1840 return RETURN_VALUE_STRUCT_CONVENTION
;
1845 aarch64_store_return_value (valtype
, regcache
, writebuf
);
1848 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
1851 debug_printf ("return value in registers\n");
1853 return RETURN_VALUE_REGISTER_CONVENTION
;
1856 /* Implement the "get_longjmp_target" gdbarch method. */
1859 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
1862 gdb_byte buf
[X_REGISTER_SIZE
];
1863 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1864 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1865 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1867 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
1869 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
1873 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
1877 /* Implement the "gen_return_address" gdbarch method. */
1880 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
1881 struct agent_expr
*ax
, struct axs_value
*value
,
1884 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
1885 value
->kind
= axs_lvalue_register
;
1886 value
->u
.reg
= AARCH64_LR_REGNUM
;
1890 /* Return the pseudo register name corresponding to register regnum. */
1893 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
1895 static const char *const q_name
[] =
1897 "q0", "q1", "q2", "q3",
1898 "q4", "q5", "q6", "q7",
1899 "q8", "q9", "q10", "q11",
1900 "q12", "q13", "q14", "q15",
1901 "q16", "q17", "q18", "q19",
1902 "q20", "q21", "q22", "q23",
1903 "q24", "q25", "q26", "q27",
1904 "q28", "q29", "q30", "q31",
1907 static const char *const d_name
[] =
1909 "d0", "d1", "d2", "d3",
1910 "d4", "d5", "d6", "d7",
1911 "d8", "d9", "d10", "d11",
1912 "d12", "d13", "d14", "d15",
1913 "d16", "d17", "d18", "d19",
1914 "d20", "d21", "d22", "d23",
1915 "d24", "d25", "d26", "d27",
1916 "d28", "d29", "d30", "d31",
1919 static const char *const s_name
[] =
1921 "s0", "s1", "s2", "s3",
1922 "s4", "s5", "s6", "s7",
1923 "s8", "s9", "s10", "s11",
1924 "s12", "s13", "s14", "s15",
1925 "s16", "s17", "s18", "s19",
1926 "s20", "s21", "s22", "s23",
1927 "s24", "s25", "s26", "s27",
1928 "s28", "s29", "s30", "s31",
1931 static const char *const h_name
[] =
1933 "h0", "h1", "h2", "h3",
1934 "h4", "h5", "h6", "h7",
1935 "h8", "h9", "h10", "h11",
1936 "h12", "h13", "h14", "h15",
1937 "h16", "h17", "h18", "h19",
1938 "h20", "h21", "h22", "h23",
1939 "h24", "h25", "h26", "h27",
1940 "h28", "h29", "h30", "h31",
1943 static const char *const b_name
[] =
1945 "b0", "b1", "b2", "b3",
1946 "b4", "b5", "b6", "b7",
1947 "b8", "b9", "b10", "b11",
1948 "b12", "b13", "b14", "b15",
1949 "b16", "b17", "b18", "b19",
1950 "b20", "b21", "b22", "b23",
1951 "b24", "b25", "b26", "b27",
1952 "b28", "b29", "b30", "b31",
1955 regnum
-= gdbarch_num_regs (gdbarch
);
1957 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
1958 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
1960 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
1961 return d_name
[regnum
- AARCH64_D0_REGNUM
];
1963 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
1964 return s_name
[regnum
- AARCH64_S0_REGNUM
];
1966 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
1967 return h_name
[regnum
- AARCH64_H0_REGNUM
];
1969 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
1970 return b_name
[regnum
- AARCH64_B0_REGNUM
];
1972 internal_error (__FILE__
, __LINE__
,
1973 _("aarch64_pseudo_register_name: bad register number %d"),
1977 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
1979 static struct type
*
1980 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
1982 regnum
-= gdbarch_num_regs (gdbarch
);
1984 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
1985 return aarch64_vnq_type (gdbarch
);
1987 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
1988 return aarch64_vnd_type (gdbarch
);
1990 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
1991 return aarch64_vns_type (gdbarch
);
1993 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
1994 return aarch64_vnh_type (gdbarch
);
1996 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
1997 return aarch64_vnb_type (gdbarch
);
1999 internal_error (__FILE__
, __LINE__
,
2000 _("aarch64_pseudo_register_type: bad register number %d"),
2004 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2007 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2008 struct reggroup
*group
)
2010 regnum
-= gdbarch_num_regs (gdbarch
);
2012 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2013 return group
== all_reggroup
|| group
== vector_reggroup
;
2014 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2015 return (group
== all_reggroup
|| group
== vector_reggroup
2016 || group
== float_reggroup
);
2017 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2018 return (group
== all_reggroup
|| group
== vector_reggroup
2019 || group
== float_reggroup
);
2020 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2021 return group
== all_reggroup
|| group
== vector_reggroup
;
2022 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2023 return group
== all_reggroup
|| group
== vector_reggroup
;
2025 return group
== all_reggroup
;
2028 /* Implement the "pseudo_register_read_value" gdbarch method. */
2030 static struct value
*
2031 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2032 struct regcache
*regcache
,
2035 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2036 struct value
*result_value
;
2039 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2040 VALUE_LVAL (result_value
) = lval_register
;
2041 VALUE_REGNUM (result_value
) = regnum
;
2042 buf
= value_contents_raw (result_value
);
2044 regnum
-= gdbarch_num_regs (gdbarch
);
2046 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2048 enum register_status status
;
2051 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2052 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2053 if (status
!= REG_VALID
)
2054 mark_value_bytes_unavailable (result_value
, 0,
2055 TYPE_LENGTH (value_type (result_value
)));
2057 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2058 return result_value
;
2061 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2063 enum register_status status
;
2066 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2067 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2068 if (status
!= REG_VALID
)
2069 mark_value_bytes_unavailable (result_value
, 0,
2070 TYPE_LENGTH (value_type (result_value
)));
2072 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2073 return result_value
;
2076 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2078 enum register_status status
;
2081 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2082 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2083 if (status
!= REG_VALID
)
2084 mark_value_bytes_unavailable (result_value
, 0,
2085 TYPE_LENGTH (value_type (result_value
)));
2087 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2088 return result_value
;
2091 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2093 enum register_status status
;
2096 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2097 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2098 if (status
!= REG_VALID
)
2099 mark_value_bytes_unavailable (result_value
, 0,
2100 TYPE_LENGTH (value_type (result_value
)));
2102 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2103 return result_value
;
2106 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2108 enum register_status status
;
2111 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2112 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2113 if (status
!= REG_VALID
)
2114 mark_value_bytes_unavailable (result_value
, 0,
2115 TYPE_LENGTH (value_type (result_value
)));
2117 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2118 return result_value
;
2121 gdb_assert_not_reached ("regnum out of bound");
2124 /* Implement the "pseudo_register_write" gdbarch method. */
2127 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2128 int regnum
, const gdb_byte
*buf
)
2130 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2132 /* Ensure the register buffer is zero, we want gdb writes of the
2133 various 'scalar' pseudo registers to behavior like architectural
2134 writes, register width bytes are written the remainder are set to
2136 memset (reg_buf
, 0, sizeof (reg_buf
));
2138 regnum
-= gdbarch_num_regs (gdbarch
);
2140 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2142 /* pseudo Q registers */
2145 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2146 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2147 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2151 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2153 /* pseudo D registers */
2156 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2157 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2158 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2162 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2166 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2167 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2168 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2172 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2174 /* pseudo H registers */
2177 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2178 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2179 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2183 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2185 /* pseudo B registers */
2188 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2189 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2190 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2194 gdb_assert_not_reached ("regnum out of bound");
2197 /* Callback function for user_reg_add. */
2199 static struct value
*
2200 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2202 const int *reg_p
= (const int *) baton
;
2204 return value_of_register (*reg_p
, frame
);
2208 /* Implement the "software_single_step" gdbarch method, needed to
2209 single step through atomic sequences on AArch64. */
2212 aarch64_software_single_step (struct frame_info
*frame
)
2214 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2215 struct address_space
*aspace
= get_frame_address_space (frame
);
2216 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2217 const int insn_size
= 4;
2218 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2219 CORE_ADDR pc
= get_frame_pc (frame
);
2220 CORE_ADDR breaks
[2] = { -1, -1 };
2222 CORE_ADDR closing_insn
= 0;
2223 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2224 byte_order_for_code
);
2227 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2228 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2231 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2234 /* Look for a Load Exclusive instruction which begins the sequence. */
2235 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2238 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2241 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2242 byte_order_for_code
);
2244 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2246 /* Check if the instruction is a conditional branch. */
2247 if (inst
.opcode
->iclass
== condbranch
)
2249 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2251 if (bc_insn_count
>= 1)
2254 /* It is, so we'll try to set a breakpoint at the destination. */
2255 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2261 /* Look for the Store Exclusive which closes the atomic sequence. */
2262 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2269 /* We didn't find a closing Store Exclusive instruction, fall back. */
2273 /* Insert breakpoint after the end of the atomic sequence. */
2274 breaks
[0] = loc
+ insn_size
;
2276 /* Check for duplicated breakpoints, and also check that the second
2277 breakpoint is not within the atomic sequence. */
2279 && (breaks
[1] == breaks
[0]
2280 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2281 last_breakpoint
= 0;
2283 /* Insert the breakpoint at the end of the sequence, and one at the
2284 destination of the conditional branch, if it exists. */
2285 for (index
= 0; index
<= last_breakpoint
; index
++)
2286 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2291 struct displaced_step_closure
2293 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2294 is being displaced stepping. */
2297 /* PC adjustment offset after displaced stepping. */
2301 /* Data when visiting instructions for displaced stepping. */
2303 struct aarch64_displaced_step_data
2305 struct aarch64_insn_data base
;
2307 /* The address where the instruction will be executed at. */
2309 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2310 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2311 /* Number of instructions in INSN_BUF. */
2312 unsigned insn_count
;
2313 /* Registers when doing displaced stepping. */
2314 struct regcache
*regs
;
2316 struct displaced_step_closure
*dsc
;
2319 /* Implementation of aarch64_insn_visitor method "b". */
2322 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2323 struct aarch64_insn_data
*data
)
2325 struct aarch64_displaced_step_data
*dsd
2326 = (struct aarch64_displaced_step_data
*) data
;
2327 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2329 if (can_encode_int32 (new_offset
, 28))
2331 /* Emit B rather than BL, because executing BL on a new address
2332 will get the wrong address into LR. In order to avoid this,
2333 we emit B, and update LR if the instruction is BL. */
2334 emit_b (dsd
->insn_buf
, 0, new_offset
);
2340 emit_nop (dsd
->insn_buf
);
2342 dsd
->dsc
->pc_adjust
= offset
;
2348 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2349 data
->insn_addr
+ 4);
2353 /* Implementation of aarch64_insn_visitor method "b_cond". */
2356 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2357 struct aarch64_insn_data
*data
)
2359 struct aarch64_displaced_step_data
*dsd
2360 = (struct aarch64_displaced_step_data
*) data
;
2362 /* GDB has to fix up PC after displaced step this instruction
2363 differently according to the condition is true or false. Instead
2364 of checking COND against conditional flags, we can use
2365 the following instructions, and GDB can tell how to fix up PC
2366 according to the PC value.
2368 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2374 emit_bcond (dsd
->insn_buf
, cond
, 8);
2376 dsd
->dsc
->pc_adjust
= offset
;
2377 dsd
->insn_count
= 1;
2380 /* Dynamically allocate a new register. If we know the register
2381 statically, we should make it a global as above instead of using this
2384 static struct aarch64_register
2385 aarch64_register (unsigned num
, int is64
)
2387 return (struct aarch64_register
) { num
, is64
};
2390 /* Implementation of aarch64_insn_visitor method "cb". */
2393 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2394 const unsigned rn
, int is64
,
2395 struct aarch64_insn_data
*data
)
2397 struct aarch64_displaced_step_data
*dsd
2398 = (struct aarch64_displaced_step_data
*) data
;
2400 /* The offset is out of range for a compare and branch
2401 instruction. We can use the following instructions instead:
2403 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2408 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2409 dsd
->insn_count
= 1;
2411 dsd
->dsc
->pc_adjust
= offset
;
2414 /* Implementation of aarch64_insn_visitor method "tb". */
2417 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2418 const unsigned rt
, unsigned bit
,
2419 struct aarch64_insn_data
*data
)
2421 struct aarch64_displaced_step_data
*dsd
2422 = (struct aarch64_displaced_step_data
*) data
;
2424 /* The offset is out of range for a test bit and branch
2425 instruction We can use the following instructions instead:
2427 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2433 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2434 dsd
->insn_count
= 1;
2436 dsd
->dsc
->pc_adjust
= offset
;
2439 /* Implementation of aarch64_insn_visitor method "adr". */
2442 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2443 const int is_adrp
, struct aarch64_insn_data
*data
)
2445 struct aarch64_displaced_step_data
*dsd
2446 = (struct aarch64_displaced_step_data
*) data
;
2447 /* We know exactly the address the ADR{P,} instruction will compute.
2448 We can just write it to the destination register. */
2449 CORE_ADDR address
= data
->insn_addr
+ offset
;
2453 /* Clear the lower 12 bits of the offset to get the 4K page. */
2454 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2458 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2461 dsd
->dsc
->pc_adjust
= 4;
2462 emit_nop (dsd
->insn_buf
);
2463 dsd
->insn_count
= 1;
2466 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2469 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2470 const unsigned rt
, const int is64
,
2471 struct aarch64_insn_data
*data
)
2473 struct aarch64_displaced_step_data
*dsd
2474 = (struct aarch64_displaced_step_data
*) data
;
2475 CORE_ADDR address
= data
->insn_addr
+ offset
;
2476 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2478 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2482 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2483 aarch64_register (rt
, 1), zero
);
2485 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2486 aarch64_register (rt
, 1), zero
);
2488 dsd
->dsc
->pc_adjust
= 4;
2491 /* Implementation of aarch64_insn_visitor method "others". */
2494 aarch64_displaced_step_others (const uint32_t insn
,
2495 struct aarch64_insn_data
*data
)
2497 struct aarch64_displaced_step_data
*dsd
2498 = (struct aarch64_displaced_step_data
*) data
;
2500 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2501 dsd
->insn_count
= 1;
2503 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2506 dsd
->dsc
->pc_adjust
= 0;
2509 dsd
->dsc
->pc_adjust
= 4;
2512 static const struct aarch64_insn_visitor visitor
=
2514 aarch64_displaced_step_b
,
2515 aarch64_displaced_step_b_cond
,
2516 aarch64_displaced_step_cb
,
2517 aarch64_displaced_step_tb
,
2518 aarch64_displaced_step_adr
,
2519 aarch64_displaced_step_ldr_literal
,
2520 aarch64_displaced_step_others
,
2523 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2525 struct displaced_step_closure
*
2526 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2527 CORE_ADDR from
, CORE_ADDR to
,
2528 struct regcache
*regs
)
2530 struct displaced_step_closure
*dsc
= NULL
;
2531 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2532 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2533 struct aarch64_displaced_step_data dsd
;
2536 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2539 /* Look for a Load Exclusive instruction which begins the sequence. */
2540 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2542 /* We can't displaced step atomic sequences. */
2546 dsc
= XCNEW (struct displaced_step_closure
);
2547 dsd
.base
.insn_addr
= from
;
2552 aarch64_relocate_instruction (insn
, &visitor
,
2553 (struct aarch64_insn_data
*) &dsd
);
2554 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2556 if (dsd
.insn_count
!= 0)
2560 /* Instruction can be relocated to scratch pad. Copy
2561 relocated instruction(s) there. */
2562 for (i
= 0; i
< dsd
.insn_count
; i
++)
2564 if (debug_displaced
)
2566 debug_printf ("displaced: writing insn ");
2567 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2568 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2570 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2571 (ULONGEST
) dsd
.insn_buf
[i
]);
2583 /* Implement the "displaced_step_fixup" gdbarch method. */
2586 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2587 struct displaced_step_closure
*dsc
,
2588 CORE_ADDR from
, CORE_ADDR to
,
2589 struct regcache
*regs
)
2595 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2598 /* Condition is true. */
2600 else if (pc
- to
== 4)
2602 /* Condition is false. */
2606 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2609 if (dsc
->pc_adjust
!= 0)
2611 if (debug_displaced
)
2613 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2614 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2616 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2617 from
+ dsc
->pc_adjust
);
2621 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2624 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2625 struct displaced_step_closure
*closure
)
2630 /* Initialize the current architecture based on INFO. If possible,
2631 re-use an architecture from ARCHES, which is a list of
2632 architectures already created during this debugging session.
2634 Called e.g. at program startup, when reading a core file, and when
2635 reading a binary file. */
2637 static struct gdbarch
*
2638 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2640 struct gdbarch_tdep
*tdep
;
2641 struct gdbarch
*gdbarch
;
2642 struct gdbarch_list
*best_arch
;
2643 struct tdesc_arch_data
*tdesc_data
= NULL
;
2644 const struct target_desc
*tdesc
= info
.target_desc
;
2647 const struct tdesc_feature
*feature
;
2649 int num_pseudo_regs
= 0;
2651 /* Ensure we always have a target descriptor. */
2652 if (!tdesc_has_registers (tdesc
))
2653 tdesc
= tdesc_aarch64
;
2657 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2659 if (feature
== NULL
)
2662 tdesc_data
= tdesc_data_alloc ();
2664 /* Validate the descriptor provides the mandatory core R registers
2665 and allocate their numbers. */
2666 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2668 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2669 aarch64_r_register_names
[i
]);
2671 num_regs
= AARCH64_X0_REGNUM
+ i
;
2673 /* Look for the V registers. */
2674 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2677 /* Validate the descriptor provides the mandatory V registers
2678 and allocate their numbers. */
2679 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2681 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2682 aarch64_v_register_names
[i
]);
2684 num_regs
= AARCH64_V0_REGNUM
+ i
;
2686 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2687 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2688 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2689 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2690 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2695 tdesc_data_cleanup (tdesc_data
);
2699 /* AArch64 code is always little-endian. */
2700 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2702 /* If there is already a candidate, use it. */
2703 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2705 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2707 /* Found a match. */
2711 if (best_arch
!= NULL
)
2713 if (tdesc_data
!= NULL
)
2714 tdesc_data_cleanup (tdesc_data
);
2715 return best_arch
->gdbarch
;
2718 tdep
= XCNEW (struct gdbarch_tdep
);
2719 gdbarch
= gdbarch_alloc (&info
, tdep
);
2721 /* This should be low enough for everything. */
2722 tdep
->lowest_pc
= 0x20;
2723 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2724 tdep
->jb_elt_size
= 8;
2726 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2727 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2729 /* Frame handling. */
2730 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2731 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2732 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2734 /* Advance PC across function entry code. */
2735 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2737 /* The stack grows downward. */
2738 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2740 /* Breakpoint manipulation. */
2741 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2742 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2743 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2745 /* Information about registers, etc. */
2746 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2747 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2748 set_gdbarch_num_regs (gdbarch
, num_regs
);
2750 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2751 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2752 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2753 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2754 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2755 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2756 aarch64_pseudo_register_reggroup_p
);
2759 set_gdbarch_short_bit (gdbarch
, 16);
2760 set_gdbarch_int_bit (gdbarch
, 32);
2761 set_gdbarch_float_bit (gdbarch
, 32);
2762 set_gdbarch_double_bit (gdbarch
, 64);
2763 set_gdbarch_long_double_bit (gdbarch
, 128);
2764 set_gdbarch_long_bit (gdbarch
, 64);
2765 set_gdbarch_long_long_bit (gdbarch
, 64);
2766 set_gdbarch_ptr_bit (gdbarch
, 64);
2767 set_gdbarch_char_signed (gdbarch
, 0);
2768 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2769 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2770 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2772 /* Internal <-> external register number maps. */
2773 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2775 /* Returning results. */
2776 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2779 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2781 /* Virtual tables. */
2782 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2784 /* Hook in the ABI-specific overrides, if they have been registered. */
2785 info
.target_desc
= tdesc
;
2786 info
.tdep_info
= (void *) tdesc_data
;
2787 gdbarch_init_osabi (info
, gdbarch
);
2789 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2791 /* Add some default predicates. */
2792 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2793 dwarf2_append_unwinders (gdbarch
);
2794 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2796 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2798 /* Now we have tuned the configuration, set a few final things,
2799 based on what the OS ABI has told us. */
2801 if (tdep
->jb_pc
>= 0)
2802 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2804 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
2806 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2808 /* Add standard register aliases. */
2809 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2810 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2811 value_of_aarch64_user_reg
,
2812 &aarch64_register_aliases
[i
].regnum
);
2818 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2820 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2825 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2826 paddress (gdbarch
, tdep
->lowest_pc
));
2829 /* Suppress warning from -Wmissing-prototypes. */
2830 extern initialize_file_ftype _initialize_aarch64_tdep
;
2833 _initialize_aarch64_tdep (void)
2835 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2838 initialize_tdesc_aarch64 ();
2840 /* Debug this file's internals. */
2841 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2842 Set AArch64 debugging."), _("\
2843 Show AArch64 debugging."), _("\
2844 When on, AArch64 specific debugging is enabled."),
2847 &setdebuglist
, &showdebuglist
);
2850 /* AArch64 process record-replay related structures, defines etc. */
2852 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2855 unsigned int reg_len = LENGTH; \
2858 REGS = XNEWVEC (uint32_t, reg_len); \
2859 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2864 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2867 unsigned int mem_len = LENGTH; \
2870 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2871 memcpy(&MEMS->len, &RECORD_BUF[0], \
2872 sizeof(struct aarch64_mem_r) * LENGTH); \
2877 /* AArch64 record/replay structures and enumerations. */
2879 struct aarch64_mem_r
2881 uint64_t len
; /* Record length. */
2882 uint64_t addr
; /* Memory address. */
2885 enum aarch64_record_result
2887 AARCH64_RECORD_SUCCESS
,
2888 AARCH64_RECORD_FAILURE
,
2889 AARCH64_RECORD_UNSUPPORTED
,
2890 AARCH64_RECORD_UNKNOWN
2893 typedef struct insn_decode_record_t
2895 struct gdbarch
*gdbarch
;
2896 struct regcache
*regcache
;
2897 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2898 uint32_t aarch64_insn
; /* Insn to be recorded. */
2899 uint32_t mem_rec_count
; /* Count of memory records. */
2900 uint32_t reg_rec_count
; /* Count of register records. */
2901 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2902 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2903 } insn_decode_record
;
2905 /* Record handler for data processing - register instructions. */
2908 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2910 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2911 uint32_t record_buf
[4];
2913 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2914 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2915 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2917 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2921 /* Logical (shifted register). */
2922 if (insn_bits24_27
== 0x0a)
2923 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2925 else if (insn_bits24_27
== 0x0b)
2926 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2928 return AARCH64_RECORD_UNKNOWN
;
2930 record_buf
[0] = reg_rd
;
2931 aarch64_insn_r
->reg_rec_count
= 1;
2933 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2937 if (insn_bits24_27
== 0x0b)
2939 /* Data-processing (3 source). */
2940 record_buf
[0] = reg_rd
;
2941 aarch64_insn_r
->reg_rec_count
= 1;
2943 else if (insn_bits24_27
== 0x0a)
2945 if (insn_bits21_23
== 0x00)
2947 /* Add/subtract (with carry). */
2948 record_buf
[0] = reg_rd
;
2949 aarch64_insn_r
->reg_rec_count
= 1;
2950 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
2952 record_buf
[1] = AARCH64_CPSR_REGNUM
;
2953 aarch64_insn_r
->reg_rec_count
= 2;
2956 else if (insn_bits21_23
== 0x02)
2958 /* Conditional compare (register) and conditional compare
2959 (immediate) instructions. */
2960 record_buf
[0] = AARCH64_CPSR_REGNUM
;
2961 aarch64_insn_r
->reg_rec_count
= 1;
2963 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
2965 /* CConditional select. */
2966 /* Data-processing (2 source). */
2967 /* Data-processing (1 source). */
2968 record_buf
[0] = reg_rd
;
2969 aarch64_insn_r
->reg_rec_count
= 1;
2972 return AARCH64_RECORD_UNKNOWN
;
2976 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
2978 return AARCH64_RECORD_SUCCESS
;
2981 /* Record handler for data processing - immediate instructions. */
2984 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
2986 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
2987 uint32_t record_buf
[4];
2989 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2990 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
2991 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2993 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
2994 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
2995 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
2997 record_buf
[0] = reg_rd
;
2998 aarch64_insn_r
->reg_rec_count
= 1;
3000 else if (insn_bits24_27
== 0x01)
3002 /* Add/Subtract (immediate). */
3003 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3004 record_buf
[0] = reg_rd
;
3005 aarch64_insn_r
->reg_rec_count
= 1;
3007 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3009 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3011 /* Logical (immediate). */
3012 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3013 record_buf
[0] = reg_rd
;
3014 aarch64_insn_r
->reg_rec_count
= 1;
3016 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3019 return AARCH64_RECORD_UNKNOWN
;
3021 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3023 return AARCH64_RECORD_SUCCESS
;
3026 /* Record handler for branch, exception generation and system instructions. */
3029 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3031 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3032 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3033 uint32_t record_buf
[4];
3035 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3036 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3037 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3039 if (insn_bits28_31
== 0x0d)
3041 /* Exception generation instructions. */
3042 if (insn_bits24_27
== 0x04)
3044 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3045 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3046 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3048 ULONGEST svc_number
;
3050 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3052 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3056 return AARCH64_RECORD_UNSUPPORTED
;
3058 /* System instructions. */
3059 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3061 uint32_t reg_rt
, reg_crn
;
3063 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3064 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3066 /* Record rt in case of sysl and mrs instructions. */
3067 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3069 record_buf
[0] = reg_rt
;
3070 aarch64_insn_r
->reg_rec_count
= 1;
3072 /* Record cpsr for hint and msr(immediate) instructions. */
3073 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3075 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3076 aarch64_insn_r
->reg_rec_count
= 1;
3079 /* Unconditional branch (register). */
3080 else if((insn_bits24_27
& 0x0e) == 0x06)
3082 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3083 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3084 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3087 return AARCH64_RECORD_UNKNOWN
;
3089 /* Unconditional branch (immediate). */
3090 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3092 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3093 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3094 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3097 /* Compare & branch (immediate), Test & branch (immediate) and
3098 Conditional branch (immediate). */
3099 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3101 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3103 return AARCH64_RECORD_SUCCESS
;
3106 /* Record handler for advanced SIMD load and store instructions. */
3109 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3112 uint64_t addr_offset
= 0;
3113 uint32_t record_buf
[24];
3114 uint64_t record_buf_mem
[24];
3115 uint32_t reg_rn
, reg_rt
;
3116 uint32_t reg_index
= 0, mem_index
= 0;
3117 uint8_t opcode_bits
, size_bits
;
3119 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3120 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3121 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3122 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3123 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3126 debug_printf ("Process record: Advanced SIMD load/store\n");
3128 /* Load/store single structure. */
3129 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3131 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3132 scale
= opcode_bits
>> 2;
3133 selem
= ((opcode_bits
& 0x02) |
3134 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3138 if (size_bits
& 0x01)
3139 return AARCH64_RECORD_UNKNOWN
;
3142 if ((size_bits
>> 1) & 0x01)
3143 return AARCH64_RECORD_UNKNOWN
;
3144 if (size_bits
& 0x01)
3146 if (!((opcode_bits
>> 1) & 0x01))
3149 return AARCH64_RECORD_UNKNOWN
;
3153 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3160 return AARCH64_RECORD_UNKNOWN
;
3166 for (sindex
= 0; sindex
< selem
; sindex
++)
3168 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3169 reg_rt
= (reg_rt
+ 1) % 32;
3173 for (sindex
= 0; sindex
< selem
; sindex
++)
3175 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3176 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3179 record_buf_mem
[mem_index
++] = esize
/ 8;
3180 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3182 addr_offset
= addr_offset
+ (esize
/ 8);
3183 reg_rt
= (reg_rt
+ 1) % 32;
3187 /* Load/store multiple structure. */
3190 uint8_t selem
, esize
, rpt
, elements
;
3191 uint8_t eindex
, rindex
;
3193 esize
= 8 << size_bits
;
3194 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3195 elements
= 128 / esize
;
3197 elements
= 64 / esize
;
3199 switch (opcode_bits
)
3201 /*LD/ST4 (4 Registers). */
3206 /*LD/ST1 (4 Registers). */
3211 /*LD/ST3 (3 Registers). */
3216 /*LD/ST1 (3 Registers). */
3221 /*LD/ST1 (1 Register). */
3226 /*LD/ST2 (2 Registers). */
3231 /*LD/ST1 (2 Registers). */
3237 return AARCH64_RECORD_UNSUPPORTED
;
3240 for (rindex
= 0; rindex
< rpt
; rindex
++)
3241 for (eindex
= 0; eindex
< elements
; eindex
++)
3243 uint8_t reg_tt
, sindex
;
3244 reg_tt
= (reg_rt
+ rindex
) % 32;
3245 for (sindex
= 0; sindex
< selem
; sindex
++)
3247 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3248 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3251 record_buf_mem
[mem_index
++] = esize
/ 8;
3252 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3254 addr_offset
= addr_offset
+ (esize
/ 8);
3255 reg_tt
= (reg_tt
+ 1) % 32;
3260 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3261 record_buf
[reg_index
++] = reg_rn
;
3263 aarch64_insn_r
->reg_rec_count
= reg_index
;
3264 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3265 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3267 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3269 return AARCH64_RECORD_SUCCESS
;
3272 /* Record handler for load and store instructions. */
3275 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3277 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3278 uint8_t insn_bit23
, insn_bit21
;
3279 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3280 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3281 uint64_t datasize
, offset
;
3282 uint32_t record_buf
[8];
3283 uint64_t record_buf_mem
[8];
3286 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3287 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3288 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3289 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3290 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3291 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3292 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3293 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3294 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3295 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3296 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3298 /* Load/store exclusive. */
3299 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3302 debug_printf ("Process record: load/store exclusive\n");
3306 record_buf
[0] = reg_rt
;
3307 aarch64_insn_r
->reg_rec_count
= 1;
3310 record_buf
[1] = reg_rt2
;
3311 aarch64_insn_r
->reg_rec_count
= 2;
3317 datasize
= (8 << size_bits
) * 2;
3319 datasize
= (8 << size_bits
);
3320 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3322 record_buf_mem
[0] = datasize
/ 8;
3323 record_buf_mem
[1] = address
;
3324 aarch64_insn_r
->mem_rec_count
= 1;
3327 /* Save register rs. */
3328 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3329 aarch64_insn_r
->reg_rec_count
= 1;
3333 /* Load register (literal) instructions decoding. */
3334 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3337 debug_printf ("Process record: load register (literal)\n");
3339 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3341 record_buf
[0] = reg_rt
;
3342 aarch64_insn_r
->reg_rec_count
= 1;
3344 /* All types of load/store pair instructions decoding. */
3345 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3348 debug_printf ("Process record: load/store pair\n");
3354 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3355 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3359 record_buf
[0] = reg_rt
;
3360 record_buf
[1] = reg_rt2
;
3362 aarch64_insn_r
->reg_rec_count
= 2;
3367 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3369 size_bits
= size_bits
>> 1;
3370 datasize
= 8 << (2 + size_bits
);
3371 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3372 offset
= offset
<< (2 + size_bits
);
3373 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3375 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3377 if (imm7_off
& 0x40)
3378 address
= address
- offset
;
3380 address
= address
+ offset
;
3383 record_buf_mem
[0] = datasize
/ 8;
3384 record_buf_mem
[1] = address
;
3385 record_buf_mem
[2] = datasize
/ 8;
3386 record_buf_mem
[3] = address
+ (datasize
/ 8);
3387 aarch64_insn_r
->mem_rec_count
= 2;
3389 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3390 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3392 /* Load/store register (unsigned immediate) instructions. */
3393 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3395 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3402 if (size_bits
!= 0x03)
3405 return AARCH64_RECORD_UNKNOWN
;
3409 debug_printf ("Process record: load/store (unsigned immediate):"
3410 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3416 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3417 datasize
= 8 << size_bits
;
3418 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3420 offset
= offset
<< size_bits
;
3421 address
= address
+ offset
;
3423 record_buf_mem
[0] = datasize
>> 3;
3424 record_buf_mem
[1] = address
;
3425 aarch64_insn_r
->mem_rec_count
= 1;
3430 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3432 record_buf
[0] = reg_rt
;
3433 aarch64_insn_r
->reg_rec_count
= 1;
3436 /* Load/store register (register offset) instructions. */
3437 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3438 && insn_bits10_11
== 0x02 && insn_bit21
)
3441 debug_printf ("Process record: load/store (register offset)\n");
3442 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3449 if (size_bits
!= 0x03)
3452 return AARCH64_RECORD_UNKNOWN
;
3456 ULONGEST reg_rm_val
;
3458 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3459 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3460 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3461 offset
= reg_rm_val
<< size_bits
;
3463 offset
= reg_rm_val
;
3464 datasize
= 8 << size_bits
;
3465 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3467 address
= address
+ offset
;
3468 record_buf_mem
[0] = datasize
>> 3;
3469 record_buf_mem
[1] = address
;
3470 aarch64_insn_r
->mem_rec_count
= 1;
3475 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3477 record_buf
[0] = reg_rt
;
3478 aarch64_insn_r
->reg_rec_count
= 1;
3481 /* Load/store register (immediate and unprivileged) instructions. */
3482 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3487 debug_printf ("Process record: load/store "
3488 "(immediate and unprivileged)\n");
3490 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3497 if (size_bits
!= 0x03)
3500 return AARCH64_RECORD_UNKNOWN
;
3505 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3506 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3507 datasize
= 8 << size_bits
;
3508 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3510 if (insn_bits10_11
!= 0x01)
3512 if (imm9_off
& 0x0100)
3513 address
= address
- offset
;
3515 address
= address
+ offset
;
3517 record_buf_mem
[0] = datasize
>> 3;
3518 record_buf_mem
[1] = address
;
3519 aarch64_insn_r
->mem_rec_count
= 1;
3524 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3526 record_buf
[0] = reg_rt
;
3527 aarch64_insn_r
->reg_rec_count
= 1;
3529 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3530 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3532 /* Advanced SIMD load/store instructions. */
3534 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3536 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3538 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3540 return AARCH64_RECORD_SUCCESS
;
3543 /* Record handler for data processing SIMD and floating point instructions. */
3546 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3548 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3549 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3550 uint8_t insn_bits11_14
;
3551 uint32_t record_buf
[2];
3553 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3554 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3555 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3556 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3557 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3558 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3559 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3560 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3561 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3564 debug_printf ("Process record: data processing SIMD/FP: ");
3566 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3568 /* Floating point - fixed point conversion instructions. */
3572 debug_printf ("FP - fixed point conversion");
3574 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3575 record_buf
[0] = reg_rd
;
3577 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3579 /* Floating point - conditional compare instructions. */
3580 else if (insn_bits10_11
== 0x01)
3583 debug_printf ("FP - conditional compare");
3585 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3587 /* Floating point - data processing (2-source) and
3588 conditional select instructions. */
3589 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3592 debug_printf ("FP - DP (2-source)");
3594 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3596 else if (insn_bits10_11
== 0x00)
3598 /* Floating point - immediate instructions. */
3599 if ((insn_bits12_15
& 0x01) == 0x01
3600 || (insn_bits12_15
& 0x07) == 0x04)
3603 debug_printf ("FP - immediate");
3604 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3606 /* Floating point - compare instructions. */
3607 else if ((insn_bits12_15
& 0x03) == 0x02)
3610 debug_printf ("FP - immediate");
3611 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3613 /* Floating point - integer conversions instructions. */
3614 else if (insn_bits12_15
== 0x00)
3616 /* Convert float to integer instruction. */
3617 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3620 debug_printf ("float to int conversion");
3622 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3624 /* Convert integer to float instruction. */
3625 else if ((opcode
>> 1) == 0x01 && !rmode
)
3628 debug_printf ("int to float conversion");
3630 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3632 /* Move float to integer instruction. */
3633 else if ((opcode
>> 1) == 0x03)
3636 debug_printf ("move float to int");
3638 if (!(opcode
& 0x01))
3639 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3641 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3644 return AARCH64_RECORD_UNKNOWN
;
3647 return AARCH64_RECORD_UNKNOWN
;
3650 return AARCH64_RECORD_UNKNOWN
;
3652 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3655 debug_printf ("SIMD copy");
3657 /* Advanced SIMD copy instructions. */
3658 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3659 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3660 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3662 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3663 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3665 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3668 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3670 /* All remaining floating point or advanced SIMD instructions. */
3674 debug_printf ("all remain");
3676 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3680 debug_printf ("\n");
3682 aarch64_insn_r
->reg_rec_count
++;
3683 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3684 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3686 return AARCH64_RECORD_SUCCESS
;
3689 /* Decodes insns type and invokes its record handler. */
3692 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3694 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3696 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3697 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3698 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3699 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3701 /* Data processing - immediate instructions. */
3702 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3703 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3705 /* Branch, exception generation and system instructions. */
3706 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3707 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3709 /* Load and store instructions. */
3710 if (!ins_bit25
&& ins_bit27
)
3711 return aarch64_record_load_store (aarch64_insn_r
);
3713 /* Data processing - register instructions. */
3714 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3715 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3717 /* Data processing - SIMD and floating point instructions. */
3718 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3719 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3721 return AARCH64_RECORD_UNSUPPORTED
;
3724 /* Cleans up local record registers and memory allocations. */
3727 deallocate_reg_mem (insn_decode_record
*record
)
3729 xfree (record
->aarch64_regs
);
3730 xfree (record
->aarch64_mems
);
3733 /* Parse the current instruction and record the values of the registers and
3734 memory that will be changed in current instruction to record_arch_list
3735 return -1 if something is wrong. */
3738 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3739 CORE_ADDR insn_addr
)
3741 uint32_t rec_no
= 0;
3742 uint8_t insn_size
= 4;
3744 gdb_byte buf
[insn_size
];
3745 insn_decode_record aarch64_record
;
3747 memset (&buf
[0], 0, insn_size
);
3748 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3749 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3750 aarch64_record
.aarch64_insn
3751 = (uint32_t) extract_unsigned_integer (&buf
[0],
3753 gdbarch_byte_order (gdbarch
));
3754 aarch64_record
.regcache
= regcache
;
3755 aarch64_record
.this_addr
= insn_addr
;
3756 aarch64_record
.gdbarch
= gdbarch
;
3758 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3759 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3761 printf_unfiltered (_("Process record does not support instruction "
3762 "0x%0x at address %s.\n"),
3763 aarch64_record
.aarch64_insn
,
3764 paddress (gdbarch
, insn_addr
));
3770 /* Record registers. */
3771 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3773 /* Always record register CPSR. */
3774 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3775 AARCH64_CPSR_REGNUM
);
3776 if (aarch64_record
.aarch64_regs
)
3777 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3778 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3779 aarch64_record
.aarch64_regs
[rec_no
]))
3782 /* Record memories. */
3783 if (aarch64_record
.aarch64_mems
)
3784 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3785 if (record_full_arch_list_add_mem
3786 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3787 aarch64_record
.aarch64_mems
[rec_no
].len
))
3790 if (record_full_arch_list_add_end ())
3794 deallocate_reg_mem (&aarch64_record
);