Refactor struct trad_frame_saved_regs
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 /* AArch64 prologue cache structure. */
176 struct aarch64_prologue_cache
177 {
178 /* The program counter at the start of the function. It is used to
179 identify this frame as a prologue frame. */
180 CORE_ADDR func;
181
182 /* The program counter at the time this frame was created; i.e. where
183 this function was called from. It is used to identify this frame as a
184 stub frame. */
185 CORE_ADDR prev_pc;
186
187 /* The stack pointer at the time this frame was created; i.e. the
188 caller's stack pointer when this function was called. It is used
189 to identify this frame. */
190 CORE_ADDR prev_sp;
191
192 /* Is the target available to read from? */
193 int available_p;
194
195 /* The frame base for this frame is just prev_sp - frame size.
196 FRAMESIZE is the distance from the frame pointer to the
197 initial stack pointer. */
198 int framesize;
199
200 /* The register used to hold the frame pointer for this frame. */
201 int framereg;
202
203 /* Saved register offsets. */
204 trad_frame_saved_reg *saved_regs;
205 };
206
207 static void
208 show_aarch64_debug (struct ui_file *file, int from_tty,
209 struct cmd_list_element *c, const char *value)
210 {
211 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
212 }
213
214 namespace {
215
216 /* Abstract instruction reader. */
217
218 class abstract_instruction_reader
219 {
220 public:
221 /* Read in one instruction. */
222 virtual ULONGEST read (CORE_ADDR memaddr, int len,
223 enum bfd_endian byte_order) = 0;
224 };
225
226 /* Instruction reader from real target. */
227
228 class instruction_reader : public abstract_instruction_reader
229 {
230 public:
231 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
232 override
233 {
234 return read_code_unsigned_integer (memaddr, len, byte_order);
235 }
236 };
237
238 } // namespace
239
240 /* If address signing is enabled, mask off the signature bits from the link
241 register, which is passed by value in ADDR, using the register values in
242 THIS_FRAME. */
243
244 static CORE_ADDR
245 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
246 struct frame_info *this_frame, CORE_ADDR addr)
247 {
248 if (tdep->has_pauth ()
249 && frame_unwind_register_unsigned (this_frame,
250 tdep->pauth_ra_state_regnum))
251 {
252 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
253 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
254 addr = addr & ~cmask;
255
256 /* Record in the frame that the link register required unmasking. */
257 set_frame_previous_pc_masked (this_frame);
258 }
259
260 return addr;
261 }
262
263 /* Implement the "get_pc_address_flags" gdbarch method. */
264
265 static std::string
266 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
267 {
268 if (pc != 0 && get_frame_pc_masked (frame))
269 return "PAC";
270
271 return "";
272 }
273
274 /* Analyze a prologue, looking for a recognizable stack frame
275 and frame pointer. Scan until we encounter a store that could
276 clobber the stack frame unexpectedly, or an unknown instruction. */
277
278 static CORE_ADDR
279 aarch64_analyze_prologue (struct gdbarch *gdbarch,
280 CORE_ADDR start, CORE_ADDR limit,
281 struct aarch64_prologue_cache *cache,
282 abstract_instruction_reader& reader)
283 {
284 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
285 int i;
286
287 /* Whether the stack has been set. This should be true when we notice a SP
288 to FP move or if we are using the SP as the base register for storing
289 data, in case the FP is ommitted. */
290 bool seen_stack_set = false;
291
292 /* Track X registers and D registers in prologue. */
293 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
294
295 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
296 regs[i] = pv_register (i, 0);
297 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
298
299 for (; start < limit; start += 4)
300 {
301 uint32_t insn;
302 aarch64_inst inst;
303
304 insn = reader.read (start, 4, byte_order_for_code);
305
306 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
307 break;
308
309 if (inst.opcode->iclass == addsub_imm
310 && (inst.opcode->op == OP_ADD
311 || strcmp ("sub", inst.opcode->name) == 0))
312 {
313 unsigned rd = inst.operands[0].reg.regno;
314 unsigned rn = inst.operands[1].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
320
321 if (inst.opcode->op == OP_ADD)
322 {
323 regs[rd] = pv_add_constant (regs[rn],
324 inst.operands[2].imm.value);
325 }
326 else
327 {
328 regs[rd] = pv_add_constant (regs[rn],
329 -inst.operands[2].imm.value);
330 }
331
332 /* Did we move SP to FP? */
333 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
334 seen_stack_set = true;
335 }
336 else if (inst.opcode->iclass == pcreladdr
337 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
338 {
339 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
340 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
341
342 regs[inst.operands[0].reg.regno] = pv_unknown ();
343 }
344 else if (inst.opcode->iclass == branch_imm)
345 {
346 /* Stop analysis on branch. */
347 break;
348 }
349 else if (inst.opcode->iclass == condbranch)
350 {
351 /* Stop analysis on branch. */
352 break;
353 }
354 else if (inst.opcode->iclass == branch_reg)
355 {
356 /* Stop analysis on branch. */
357 break;
358 }
359 else if (inst.opcode->iclass == compbranch)
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else if (inst.opcode->op == OP_MOVZ)
365 {
366 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
367
368 /* If this shows up before we set the stack, keep going. Otherwise
369 stop the analysis. */
370 if (seen_stack_set)
371 break;
372
373 regs[inst.operands[0].reg.regno] = pv_unknown ();
374 }
375 else if (inst.opcode->iclass == log_shift
376 && strcmp (inst.opcode->name, "orr") == 0)
377 {
378 unsigned rd = inst.operands[0].reg.regno;
379 unsigned rn = inst.operands[1].reg.regno;
380 unsigned rm = inst.operands[2].reg.regno;
381
382 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
383 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
385
386 if (inst.operands[2].shifter.amount == 0
387 && rn == AARCH64_SP_REGNUM)
388 regs[rd] = regs[rm];
389 else
390 {
391 if (aarch64_debug)
392 {
393 debug_printf ("aarch64: prologue analysis gave up "
394 "addr=%s opcode=0x%x (orr x register)\n",
395 core_addr_to_string_nz (start), insn);
396 }
397 break;
398 }
399 }
400 else if (inst.opcode->op == OP_STUR)
401 {
402 unsigned rt = inst.operands[0].reg.regno;
403 unsigned rn = inst.operands[1].addr.base_regno;
404 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
405
406 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
407 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
408 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
409 gdb_assert (!inst.operands[1].addr.offset.is_reg);
410
411 stack.store
412 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
413 size, regs[rt]);
414
415 /* Are we storing with SP as a base? */
416 if (rn == AARCH64_SP_REGNUM)
417 seen_stack_set = true;
418 }
419 else if ((inst.opcode->iclass == ldstpair_off
420 || (inst.opcode->iclass == ldstpair_indexed
421 && inst.operands[2].addr.preind))
422 && strcmp ("stp", inst.opcode->name) == 0)
423 {
424 /* STP with addressing mode Pre-indexed and Base register. */
425 unsigned rt1;
426 unsigned rt2;
427 unsigned rn = inst.operands[2].addr.base_regno;
428 int32_t imm = inst.operands[2].addr.offset.imm;
429 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
430
431 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
432 || inst.operands[0].type == AARCH64_OPND_Ft);
433 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
434 || inst.operands[1].type == AARCH64_OPND_Ft2);
435 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
436 gdb_assert (!inst.operands[2].addr.offset.is_reg);
437
438 /* If recording this store would invalidate the store area
439 (perhaps because rn is not known) then we should abandon
440 further prologue analysis. */
441 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
442 break;
443
444 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
445 break;
446
447 rt1 = inst.operands[0].reg.regno;
448 rt2 = inst.operands[1].reg.regno;
449 if (inst.operands[0].type == AARCH64_OPND_Ft)
450 {
451 rt1 += AARCH64_X_REGISTER_COUNT;
452 rt2 += AARCH64_X_REGISTER_COUNT;
453 }
454
455 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
456 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
457
458 if (inst.operands[2].addr.writeback)
459 regs[rn] = pv_add_constant (regs[rn], imm);
460
461 /* Ignore the instruction that allocates stack space and sets
462 the SP. */
463 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
464 seen_stack_set = true;
465 }
466 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
467 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
468 && (inst.opcode->op == OP_STR_POS
469 || inst.opcode->op == OP_STRF_POS)))
470 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
471 && strcmp ("str", inst.opcode->name) == 0)
472 {
473 /* STR (immediate) */
474 unsigned int rt = inst.operands[0].reg.regno;
475 int32_t imm = inst.operands[1].addr.offset.imm;
476 unsigned int rn = inst.operands[1].addr.base_regno;
477 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
478 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
479 || inst.operands[0].type == AARCH64_OPND_Ft);
480
481 if (inst.operands[0].type == AARCH64_OPND_Ft)
482 rt += AARCH64_X_REGISTER_COUNT;
483
484 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
485 if (inst.operands[1].addr.writeback)
486 regs[rn] = pv_add_constant (regs[rn], imm);
487
488 /* Are we storing with SP as a base? */
489 if (rn == AARCH64_SP_REGNUM)
490 seen_stack_set = true;
491 }
492 else if (inst.opcode->iclass == testbranch)
493 {
494 /* Stop analysis on branch. */
495 break;
496 }
497 else if (inst.opcode->iclass == ic_system)
498 {
499 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
500 int ra_state_val = 0;
501
502 if (insn == 0xd503233f /* paciasp. */
503 || insn == 0xd503237f /* pacibsp. */)
504 {
505 /* Return addresses are mangled. */
506 ra_state_val = 1;
507 }
508 else if (insn == 0xd50323bf /* autiasp. */
509 || insn == 0xd50323ff /* autibsp. */)
510 {
511 /* Return addresses are not mangled. */
512 ra_state_val = 0;
513 }
514 else
515 {
516 if (aarch64_debug)
517 debug_printf ("aarch64: prologue analysis gave up addr=%s"
518 " opcode=0x%x (iclass)\n",
519 core_addr_to_string_nz (start), insn);
520 break;
521 }
522
523 if (tdep->has_pauth () && cache != nullptr)
524 trad_frame_set_value (cache->saved_regs,
525 tdep->pauth_ra_state_regnum,
526 ra_state_val);
527 }
528 else
529 {
530 if (aarch64_debug)
531 {
532 debug_printf ("aarch64: prologue analysis gave up addr=%s"
533 " opcode=0x%x\n",
534 core_addr_to_string_nz (start), insn);
535 }
536 break;
537 }
538 }
539
540 if (cache == NULL)
541 return start;
542
543 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
544 {
545 /* Frame pointer is fp. Frame size is constant. */
546 cache->framereg = AARCH64_FP_REGNUM;
547 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
548 }
549 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
550 {
551 /* Try the stack pointer. */
552 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
553 cache->framereg = AARCH64_SP_REGNUM;
554 }
555 else
556 {
557 /* We're just out of luck. We don't know where the frame is. */
558 cache->framereg = -1;
559 cache->framesize = 0;
560 }
561
562 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
563 {
564 CORE_ADDR offset;
565
566 if (stack.find_reg (gdbarch, i, &offset))
567 cache->saved_regs[i].set_addr (offset);
568 }
569
570 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
571 {
572 int regnum = gdbarch_num_regs (gdbarch);
573 CORE_ADDR offset;
574
575 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
576 &offset))
577 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
578 }
579
580 return start;
581 }
582
583 static CORE_ADDR
584 aarch64_analyze_prologue (struct gdbarch *gdbarch,
585 CORE_ADDR start, CORE_ADDR limit,
586 struct aarch64_prologue_cache *cache)
587 {
588 instruction_reader reader;
589
590 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
591 reader);
592 }
593
594 #if GDB_SELF_TEST
595
596 namespace selftests {
597
598 /* Instruction reader from manually cooked instruction sequences. */
599
600 class instruction_reader_test : public abstract_instruction_reader
601 {
602 public:
603 template<size_t SIZE>
604 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
605 : m_insns (insns), m_insns_size (SIZE)
606 {}
607
608 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
609 override
610 {
611 SELF_CHECK (len == 4);
612 SELF_CHECK (memaddr % 4 == 0);
613 SELF_CHECK (memaddr / 4 < m_insns_size);
614
615 return m_insns[memaddr / 4];
616 }
617
618 private:
619 const uint32_t *m_insns;
620 size_t m_insns_size;
621 };
622
623 static void
624 aarch64_analyze_prologue_test (void)
625 {
626 struct gdbarch_info info;
627
628 gdbarch_info_init (&info);
629 info.bfd_arch_info = bfd_scan_arch ("aarch64");
630
631 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
632 SELF_CHECK (gdbarch != NULL);
633
634 struct aarch64_prologue_cache cache;
635 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
636
637 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
638
639 /* Test the simple prologue in which frame pointer is used. */
640 {
641 static const uint32_t insns[] = {
642 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
643 0x910003fd, /* mov x29, sp */
644 0x97ffffe6, /* bl 0x400580 */
645 };
646 instruction_reader_test reader (insns);
647
648 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
649 SELF_CHECK (end == 4 * 2);
650
651 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
652 SELF_CHECK (cache.framesize == 272);
653
654 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
655 {
656 if (i == AARCH64_FP_REGNUM)
657 SELF_CHECK (cache.saved_regs[i].addr () == -272);
658 else if (i == AARCH64_LR_REGNUM)
659 SELF_CHECK (cache.saved_regs[i].addr () == -264);
660 else
661 SELF_CHECK (cache.saved_regs[i].is_realreg ());
662 }
663
664 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
665 {
666 int regnum = gdbarch_num_regs (gdbarch);
667
668 SELF_CHECK (cache.saved_regs[i + regnum
669 + AARCH64_D0_REGNUM].is_realreg ());
670 }
671 }
672
673 /* Test a prologue in which STR is used and frame pointer is not
674 used. */
675 {
676 static const uint32_t insns[] = {
677 0xf81d0ff3, /* str x19, [sp, #-48]! */
678 0xb9002fe0, /* str w0, [sp, #44] */
679 0xf90013e1, /* str x1, [sp, #32]*/
680 0xfd000fe0, /* str d0, [sp, #24] */
681 0xaa0203f3, /* mov x19, x2 */
682 0xf94013e0, /* ldr x0, [sp, #32] */
683 };
684 instruction_reader_test reader (insns);
685
686 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
687 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
688
689 SELF_CHECK (end == 4 * 5);
690
691 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
692 SELF_CHECK (cache.framesize == 48);
693
694 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
695 {
696 if (i == 1)
697 SELF_CHECK (cache.saved_regs[i].addr () == -16);
698 else if (i == 19)
699 SELF_CHECK (cache.saved_regs[i].addr () == -48);
700 else
701 SELF_CHECK (cache.saved_regs[i].is_realreg ());
702 }
703
704 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
705 {
706 int regnum = gdbarch_num_regs (gdbarch);
707
708 if (i == 0)
709 SELF_CHECK (cache.saved_regs[i + regnum
710 + AARCH64_D0_REGNUM].addr ()
711 == -24);
712 else
713 SELF_CHECK (cache.saved_regs[i + regnum
714 + AARCH64_D0_REGNUM].is_realreg ());
715 }
716 }
717
718 /* Test handling of movz before setting the frame pointer. */
719 {
720 static const uint32_t insns[] = {
721 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
722 0x52800020, /* mov w0, #0x1 */
723 0x910003fd, /* mov x29, sp */
724 0x528000a2, /* mov w2, #0x5 */
725 0x97fffff8, /* bl 6e4 */
726 };
727
728 instruction_reader_test reader (insns);
729
730 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
731 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
732
733 /* We should stop at the 4th instruction. */
734 SELF_CHECK (end == (4 - 1) * 4);
735 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
736 SELF_CHECK (cache.framesize == 16);
737 }
738
739 /* Test handling of movz/stp when using the stack pointer as frame
740 pointer. */
741 {
742 static const uint32_t insns[] = {
743 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
744 0x52800020, /* mov w0, #0x1 */
745 0x290207e0, /* stp w0, w1, [sp, #16] */
746 0xa9018fe2, /* stp x2, x3, [sp, #24] */
747 0x528000a2, /* mov w2, #0x5 */
748 0x97fffff8, /* bl 6e4 */
749 };
750
751 instruction_reader_test reader (insns);
752
753 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
754 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
755
756 /* We should stop at the 5th instruction. */
757 SELF_CHECK (end == (5 - 1) * 4);
758 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
759 SELF_CHECK (cache.framesize == 64);
760 }
761
762 /* Test handling of movz/str when using the stack pointer as frame
763 pointer */
764 {
765 static const uint32_t insns[] = {
766 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
767 0x52800020, /* mov w0, #0x1 */
768 0xb9002be4, /* str w4, [sp, #40] */
769 0xf9001be5, /* str x5, [sp, #48] */
770 0x528000a2, /* mov w2, #0x5 */
771 0x97fffff8, /* bl 6e4 */
772 };
773
774 instruction_reader_test reader (insns);
775
776 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
777 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
778
779 /* We should stop at the 5th instruction. */
780 SELF_CHECK (end == (5 - 1) * 4);
781 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
782 SELF_CHECK (cache.framesize == 64);
783 }
784
785 /* Test handling of movz/stur when using the stack pointer as frame
786 pointer. */
787 {
788 static const uint32_t insns[] = {
789 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
790 0x52800020, /* mov w0, #0x1 */
791 0xb80343e6, /* stur w6, [sp, #52] */
792 0xf80383e7, /* stur x7, [sp, #56] */
793 0x528000a2, /* mov w2, #0x5 */
794 0x97fffff8, /* bl 6e4 */
795 };
796
797 instruction_reader_test reader (insns);
798
799 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
800 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
801
802 /* We should stop at the 5th instruction. */
803 SELF_CHECK (end == (5 - 1) * 4);
804 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
805 SELF_CHECK (cache.framesize == 64);
806 }
807
808 /* Test handling of movz when there is no frame pointer set or no stack
809 pointer used. */
810 {
811 static const uint32_t insns[] = {
812 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
813 0x52800020, /* mov w0, #0x1 */
814 0x528000a2, /* mov w2, #0x5 */
815 0x97fffff8, /* bl 6e4 */
816 };
817
818 instruction_reader_test reader (insns);
819
820 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
821 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
822
823 /* We should stop at the 4th instruction. */
824 SELF_CHECK (end == (4 - 1) * 4);
825 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
826 SELF_CHECK (cache.framesize == 16);
827 }
828
829 /* Test a prologue in which there is a return address signing instruction. */
830 if (tdep->has_pauth ())
831 {
832 static const uint32_t insns[] = {
833 0xd503233f, /* paciasp */
834 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
835 0x910003fd, /* mov x29, sp */
836 0xf801c3f3, /* str x19, [sp, #28] */
837 0xb9401fa0, /* ldr x19, [x29, #28] */
838 };
839 instruction_reader_test reader (insns);
840
841 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
842 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
843 reader);
844
845 SELF_CHECK (end == 4 * 4);
846 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
847 SELF_CHECK (cache.framesize == 48);
848
849 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
850 {
851 if (i == 19)
852 SELF_CHECK (cache.saved_regs[i].addr () == -20);
853 else if (i == AARCH64_FP_REGNUM)
854 SELF_CHECK (cache.saved_regs[i].addr () == -48);
855 else if (i == AARCH64_LR_REGNUM)
856 SELF_CHECK (cache.saved_regs[i].addr () == -40);
857 else
858 SELF_CHECK (cache.saved_regs[i].is_realreg ());
859 }
860
861 if (tdep->has_pauth ())
862 {
863 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
864 tdep->pauth_ra_state_regnum));
865 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr ()
866 == 1);
867 }
868 }
869 }
870 } // namespace selftests
871 #endif /* GDB_SELF_TEST */
872
873 /* Implement the "skip_prologue" gdbarch method. */
874
875 static CORE_ADDR
876 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
877 {
878 CORE_ADDR func_addr, limit_pc;
879
880 /* See if we can determine the end of the prologue via the symbol
881 table. If so, then return either PC, or the PC after the
882 prologue, whichever is greater. */
883 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
884 {
885 CORE_ADDR post_prologue_pc
886 = skip_prologue_using_sal (gdbarch, func_addr);
887
888 if (post_prologue_pc != 0)
889 return std::max (pc, post_prologue_pc);
890 }
891
892 /* Can't determine prologue from the symbol table, need to examine
893 instructions. */
894
895 /* Find an upper limit on the function prologue using the debug
896 information. If the debug information could not be used to
897 provide that bound, then use an arbitrary large number as the
898 upper bound. */
899 limit_pc = skip_prologue_using_sal (gdbarch, pc);
900 if (limit_pc == 0)
901 limit_pc = pc + 128; /* Magic. */
902
903 /* Try disassembling prologue. */
904 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
905 }
906
907 /* Scan the function prologue for THIS_FRAME and populate the prologue
908 cache CACHE. */
909
910 static void
911 aarch64_scan_prologue (struct frame_info *this_frame,
912 struct aarch64_prologue_cache *cache)
913 {
914 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
915 CORE_ADDR prologue_start;
916 CORE_ADDR prologue_end;
917 CORE_ADDR prev_pc = get_frame_pc (this_frame);
918 struct gdbarch *gdbarch = get_frame_arch (this_frame);
919
920 cache->prev_pc = prev_pc;
921
922 /* Assume we do not find a frame. */
923 cache->framereg = -1;
924 cache->framesize = 0;
925
926 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
927 &prologue_end))
928 {
929 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
930
931 if (sal.line == 0)
932 {
933 /* No line info so use the current PC. */
934 prologue_end = prev_pc;
935 }
936 else if (sal.end < prologue_end)
937 {
938 /* The next line begins after the function end. */
939 prologue_end = sal.end;
940 }
941
942 prologue_end = std::min (prologue_end, prev_pc);
943 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
944 }
945 else
946 {
947 CORE_ADDR frame_loc;
948
949 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
950 if (frame_loc == 0)
951 return;
952
953 cache->framereg = AARCH64_FP_REGNUM;
954 cache->framesize = 16;
955 cache->saved_regs[29].set_addr (0);
956 cache->saved_regs[30].set_addr (8);
957 }
958 }
959
960 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
961 function may throw an exception if the inferior's registers or memory is
962 not available. */
963
964 static void
965 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
966 struct aarch64_prologue_cache *cache)
967 {
968 CORE_ADDR unwound_fp;
969 int reg;
970
971 aarch64_scan_prologue (this_frame, cache);
972
973 if (cache->framereg == -1)
974 return;
975
976 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
977 if (unwound_fp == 0)
978 return;
979
980 cache->prev_sp = unwound_fp + cache->framesize;
981
982 /* Calculate actual addresses of saved registers using offsets
983 determined by aarch64_analyze_prologue. */
984 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
985 if (trad_frame_addr_p (cache->saved_regs, reg))
986 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
987 + cache->prev_sp);
988
989 cache->func = get_frame_func (this_frame);
990
991 cache->available_p = 1;
992 }
993
994 /* Allocate and fill in *THIS_CACHE with information about the prologue of
995 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
996 Return a pointer to the current aarch64_prologue_cache in
997 *THIS_CACHE. */
998
999 static struct aarch64_prologue_cache *
1000 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1001 {
1002 struct aarch64_prologue_cache *cache;
1003
1004 if (*this_cache != NULL)
1005 return (struct aarch64_prologue_cache *) *this_cache;
1006
1007 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1008 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1009 *this_cache = cache;
1010
1011 try
1012 {
1013 aarch64_make_prologue_cache_1 (this_frame, cache);
1014 }
1015 catch (const gdb_exception_error &ex)
1016 {
1017 if (ex.error != NOT_AVAILABLE_ERROR)
1018 throw;
1019 }
1020
1021 return cache;
1022 }
1023
1024 /* Implement the "stop_reason" frame_unwind method. */
1025
1026 static enum unwind_stop_reason
1027 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1028 void **this_cache)
1029 {
1030 struct aarch64_prologue_cache *cache
1031 = aarch64_make_prologue_cache (this_frame, this_cache);
1032
1033 if (!cache->available_p)
1034 return UNWIND_UNAVAILABLE;
1035
1036 /* Halt the backtrace at "_start". */
1037 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1038 return UNWIND_OUTERMOST;
1039
1040 /* We've hit a wall, stop. */
1041 if (cache->prev_sp == 0)
1042 return UNWIND_OUTERMOST;
1043
1044 return UNWIND_NO_REASON;
1045 }
1046
1047 /* Our frame ID for a normal frame is the current function's starting
1048 PC and the caller's SP when we were called. */
1049
1050 static void
1051 aarch64_prologue_this_id (struct frame_info *this_frame,
1052 void **this_cache, struct frame_id *this_id)
1053 {
1054 struct aarch64_prologue_cache *cache
1055 = aarch64_make_prologue_cache (this_frame, this_cache);
1056
1057 if (!cache->available_p)
1058 *this_id = frame_id_build_unavailable_stack (cache->func);
1059 else
1060 *this_id = frame_id_build (cache->prev_sp, cache->func);
1061 }
1062
1063 /* Implement the "prev_register" frame_unwind method. */
1064
1065 static struct value *
1066 aarch64_prologue_prev_register (struct frame_info *this_frame,
1067 void **this_cache, int prev_regnum)
1068 {
1069 struct aarch64_prologue_cache *cache
1070 = aarch64_make_prologue_cache (this_frame, this_cache);
1071
1072 /* If we are asked to unwind the PC, then we need to return the LR
1073 instead. The prologue may save PC, but it will point into this
1074 frame's prologue, not the next frame's resume location. */
1075 if (prev_regnum == AARCH64_PC_REGNUM)
1076 {
1077 CORE_ADDR lr;
1078 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1079 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1080
1081 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1082
1083 if (tdep->has_pauth ()
1084 && trad_frame_value_p (cache->saved_regs,
1085 tdep->pauth_ra_state_regnum))
1086 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1087
1088 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1089 }
1090
1091 /* SP is generally not saved to the stack, but this frame is
1092 identified by the next frame's stack pointer at the time of the
1093 call. The value was already reconstructed into PREV_SP. */
1094 /*
1095 +----------+ ^
1096 | saved lr | |
1097 +->| saved fp |--+
1098 | | |
1099 | | | <- Previous SP
1100 | +----------+
1101 | | saved lr |
1102 +--| saved fp |<- FP
1103 | |
1104 | |<- SP
1105 +----------+ */
1106 if (prev_regnum == AARCH64_SP_REGNUM)
1107 return frame_unwind_got_constant (this_frame, prev_regnum,
1108 cache->prev_sp);
1109
1110 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1111 prev_regnum);
1112 }
1113
1114 /* AArch64 prologue unwinder. */
1115 struct frame_unwind aarch64_prologue_unwind =
1116 {
1117 NORMAL_FRAME,
1118 aarch64_prologue_frame_unwind_stop_reason,
1119 aarch64_prologue_this_id,
1120 aarch64_prologue_prev_register,
1121 NULL,
1122 default_frame_sniffer
1123 };
1124
1125 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1126 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1127 Return a pointer to the current aarch64_prologue_cache in
1128 *THIS_CACHE. */
1129
1130 static struct aarch64_prologue_cache *
1131 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1132 {
1133 struct aarch64_prologue_cache *cache;
1134
1135 if (*this_cache != NULL)
1136 return (struct aarch64_prologue_cache *) *this_cache;
1137
1138 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1139 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1140 *this_cache = cache;
1141
1142 try
1143 {
1144 cache->prev_sp = get_frame_register_unsigned (this_frame,
1145 AARCH64_SP_REGNUM);
1146 cache->prev_pc = get_frame_pc (this_frame);
1147 cache->available_p = 1;
1148 }
1149 catch (const gdb_exception_error &ex)
1150 {
1151 if (ex.error != NOT_AVAILABLE_ERROR)
1152 throw;
1153 }
1154
1155 return cache;
1156 }
1157
1158 /* Implement the "stop_reason" frame_unwind method. */
1159
1160 static enum unwind_stop_reason
1161 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1162 void **this_cache)
1163 {
1164 struct aarch64_prologue_cache *cache
1165 = aarch64_make_stub_cache (this_frame, this_cache);
1166
1167 if (!cache->available_p)
1168 return UNWIND_UNAVAILABLE;
1169
1170 return UNWIND_NO_REASON;
1171 }
1172
1173 /* Our frame ID for a stub frame is the current SP and LR. */
1174
1175 static void
1176 aarch64_stub_this_id (struct frame_info *this_frame,
1177 void **this_cache, struct frame_id *this_id)
1178 {
1179 struct aarch64_prologue_cache *cache
1180 = aarch64_make_stub_cache (this_frame, this_cache);
1181
1182 if (cache->available_p)
1183 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1184 else
1185 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1186 }
1187
1188 /* Implement the "sniffer" frame_unwind method. */
1189
1190 static int
1191 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1192 struct frame_info *this_frame,
1193 void **this_prologue_cache)
1194 {
1195 CORE_ADDR addr_in_block;
1196 gdb_byte dummy[4];
1197
1198 addr_in_block = get_frame_address_in_block (this_frame);
1199 if (in_plt_section (addr_in_block)
1200 /* We also use the stub winder if the target memory is unreadable
1201 to avoid having the prologue unwinder trying to read it. */
1202 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1203 return 1;
1204
1205 return 0;
1206 }
1207
1208 /* AArch64 stub unwinder. */
1209 struct frame_unwind aarch64_stub_unwind =
1210 {
1211 NORMAL_FRAME,
1212 aarch64_stub_frame_unwind_stop_reason,
1213 aarch64_stub_this_id,
1214 aarch64_prologue_prev_register,
1215 NULL,
1216 aarch64_stub_unwind_sniffer
1217 };
1218
1219 /* Return the frame base address of *THIS_FRAME. */
1220
1221 static CORE_ADDR
1222 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1223 {
1224 struct aarch64_prologue_cache *cache
1225 = aarch64_make_prologue_cache (this_frame, this_cache);
1226
1227 return cache->prev_sp - cache->framesize;
1228 }
1229
1230 /* AArch64 default frame base information. */
1231 struct frame_base aarch64_normal_base =
1232 {
1233 &aarch64_prologue_unwind,
1234 aarch64_normal_frame_base,
1235 aarch64_normal_frame_base,
1236 aarch64_normal_frame_base
1237 };
1238
1239 /* Return the value of the REGNUM register in the previous frame of
1240 *THIS_FRAME. */
1241
1242 static struct value *
1243 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1244 void **this_cache, int regnum)
1245 {
1246 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1247 CORE_ADDR lr;
1248
1249 switch (regnum)
1250 {
1251 case AARCH64_PC_REGNUM:
1252 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1253 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1254 return frame_unwind_got_constant (this_frame, regnum, lr);
1255
1256 default:
1257 internal_error (__FILE__, __LINE__,
1258 _("Unexpected register %d"), regnum);
1259 }
1260 }
1261
1262 static const unsigned char op_lit0 = DW_OP_lit0;
1263 static const unsigned char op_lit1 = DW_OP_lit1;
1264
1265 /* Implement the "init_reg" dwarf2_frame_ops method. */
1266
1267 static void
1268 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1269 struct dwarf2_frame_state_reg *reg,
1270 struct frame_info *this_frame)
1271 {
1272 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1273
1274 switch (regnum)
1275 {
1276 case AARCH64_PC_REGNUM:
1277 reg->how = DWARF2_FRAME_REG_FN;
1278 reg->loc.fn = aarch64_dwarf2_prev_register;
1279 return;
1280
1281 case AARCH64_SP_REGNUM:
1282 reg->how = DWARF2_FRAME_REG_CFA;
1283 return;
1284 }
1285
1286 /* Init pauth registers. */
1287 if (tdep->has_pauth ())
1288 {
1289 if (regnum == tdep->pauth_ra_state_regnum)
1290 {
1291 /* Initialize RA_STATE to zero. */
1292 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1293 reg->loc.exp.start = &op_lit0;
1294 reg->loc.exp.len = 1;
1295 return;
1296 }
1297 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1298 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1299 {
1300 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1301 return;
1302 }
1303 }
1304 }
1305
1306 /* Implement the execute_dwarf_cfa_vendor_op method. */
1307
1308 static bool
1309 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1310 struct dwarf2_frame_state *fs)
1311 {
1312 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1313 struct dwarf2_frame_state_reg *ra_state;
1314
1315 if (op == DW_CFA_AARCH64_negate_ra_state)
1316 {
1317 /* On systems without pauth, treat as a nop. */
1318 if (!tdep->has_pauth ())
1319 return true;
1320
1321 /* Allocate RA_STATE column if it's not allocated yet. */
1322 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1323
1324 /* Toggle the status of RA_STATE between 0 and 1. */
1325 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1326 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1327
1328 if (ra_state->loc.exp.start == nullptr
1329 || ra_state->loc.exp.start == &op_lit0)
1330 ra_state->loc.exp.start = &op_lit1;
1331 else
1332 ra_state->loc.exp.start = &op_lit0;
1333
1334 ra_state->loc.exp.len = 1;
1335
1336 return true;
1337 }
1338
1339 return false;
1340 }
1341
1342 /* Used for matching BRK instructions for AArch64. */
1343 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1344 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1345
1346 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1347
1348 static bool
1349 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1350 {
1351 const uint32_t insn_len = 4;
1352 gdb_byte target_mem[4];
1353
1354 /* Enable the automatic memory restoration from breakpoints while
1355 we read the memory. Otherwise we may find temporary breakpoints, ones
1356 inserted by GDB, and flag them as permanent breakpoints. */
1357 scoped_restore restore_memory
1358 = make_scoped_restore_show_memory_breakpoints (0);
1359
1360 if (target_read_memory (address, target_mem, insn_len) == 0)
1361 {
1362 uint32_t insn =
1363 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1364 gdbarch_byte_order_for_code (gdbarch));
1365
1366 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1367 of such instructions with different immediate values. Different OS'
1368 may use a different variation, but they have the same outcome. */
1369 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1370 }
1371
1372 return false;
1373 }
1374
1375 /* When arguments must be pushed onto the stack, they go on in reverse
1376 order. The code below implements a FILO (stack) to do this. */
1377
1378 struct stack_item_t
1379 {
1380 /* Value to pass on stack. It can be NULL if this item is for stack
1381 padding. */
1382 const gdb_byte *data;
1383
1384 /* Size in bytes of value to pass on stack. */
1385 int len;
1386 };
1387
1388 /* Implement the gdbarch type alignment method, overrides the generic
1389 alignment algorithm for anything that is aarch64 specific. */
1390
1391 static ULONGEST
1392 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1393 {
1394 t = check_typedef (t);
1395 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1396 {
1397 /* Use the natural alignment for vector types (the same for
1398 scalar type), but the maximum alignment is 128-bit. */
1399 if (TYPE_LENGTH (t) > 16)
1400 return 16;
1401 else
1402 return TYPE_LENGTH (t);
1403 }
1404
1405 /* Allow the common code to calculate the alignment. */
1406 return 0;
1407 }
1408
1409 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1410
1411 Return the number of register required, or -1 on failure.
1412
1413 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1414 to the element, else fail if the type of this element does not match the
1415 existing value. */
1416
1417 static int
1418 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1419 struct type **fundamental_type)
1420 {
1421 if (type == nullptr)
1422 return -1;
1423
1424 switch (type->code ())
1425 {
1426 case TYPE_CODE_FLT:
1427 if (TYPE_LENGTH (type) > 16)
1428 return -1;
1429
1430 if (*fundamental_type == nullptr)
1431 *fundamental_type = type;
1432 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1433 || type->code () != (*fundamental_type)->code ())
1434 return -1;
1435
1436 return 1;
1437
1438 case TYPE_CODE_COMPLEX:
1439 {
1440 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1441 if (TYPE_LENGTH (target_type) > 16)
1442 return -1;
1443
1444 if (*fundamental_type == nullptr)
1445 *fundamental_type = target_type;
1446 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1447 || target_type->code () != (*fundamental_type)->code ())
1448 return -1;
1449
1450 return 2;
1451 }
1452
1453 case TYPE_CODE_ARRAY:
1454 {
1455 if (type->is_vector ())
1456 {
1457 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1458 return -1;
1459
1460 if (*fundamental_type == nullptr)
1461 *fundamental_type = type;
1462 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1463 || type->code () != (*fundamental_type)->code ())
1464 return -1;
1465
1466 return 1;
1467 }
1468 else
1469 {
1470 struct type *target_type = TYPE_TARGET_TYPE (type);
1471 int count = aapcs_is_vfp_call_or_return_candidate_1
1472 (target_type, fundamental_type);
1473
1474 if (count == -1)
1475 return count;
1476
1477 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1478 return count;
1479 }
1480 }
1481
1482 case TYPE_CODE_STRUCT:
1483 case TYPE_CODE_UNION:
1484 {
1485 int count = 0;
1486
1487 for (int i = 0; i < type->num_fields (); i++)
1488 {
1489 /* Ignore any static fields. */
1490 if (field_is_static (&type->field (i)))
1491 continue;
1492
1493 struct type *member = check_typedef (type->field (i).type ());
1494
1495 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1496 (member, fundamental_type);
1497 if (sub_count == -1)
1498 return -1;
1499 count += sub_count;
1500 }
1501
1502 /* Ensure there is no padding between the fields (allowing for empty
1503 zero length structs) */
1504 int ftype_length = (*fundamental_type == nullptr)
1505 ? 0 : TYPE_LENGTH (*fundamental_type);
1506 if (count * ftype_length != TYPE_LENGTH (type))
1507 return -1;
1508
1509 return count;
1510 }
1511
1512 default:
1513 break;
1514 }
1515
1516 return -1;
1517 }
1518
1519 /* Return true if an argument, whose type is described by TYPE, can be passed or
1520 returned in simd/fp registers, providing enough parameter passing registers
1521 are available. This is as described in the AAPCS64.
1522
1523 Upon successful return, *COUNT returns the number of needed registers,
1524 *FUNDAMENTAL_TYPE contains the type of those registers.
1525
1526 Candidate as per the AAPCS64 5.4.2.C is either a:
1527 - float.
1528 - short-vector.
1529 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1530 all the members are floats and has at most 4 members.
1531 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1532 all the members are short vectors and has at most 4 members.
1533 - Complex (7.1.1)
1534
1535 Note that HFAs and HVAs can include nested structures and arrays. */
1536
1537 static bool
1538 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1539 struct type **fundamental_type)
1540 {
1541 if (type == nullptr)
1542 return false;
1543
1544 *fundamental_type = nullptr;
1545
1546 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1547 fundamental_type);
1548
1549 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1550 {
1551 *count = ag_count;
1552 return true;
1553 }
1554 else
1555 return false;
1556 }
1557
1558 /* AArch64 function call information structure. */
1559 struct aarch64_call_info
1560 {
1561 /* the current argument number. */
1562 unsigned argnum = 0;
1563
1564 /* The next general purpose register number, equivalent to NGRN as
1565 described in the AArch64 Procedure Call Standard. */
1566 unsigned ngrn = 0;
1567
1568 /* The next SIMD and floating point register number, equivalent to
1569 NSRN as described in the AArch64 Procedure Call Standard. */
1570 unsigned nsrn = 0;
1571
1572 /* The next stacked argument address, equivalent to NSAA as
1573 described in the AArch64 Procedure Call Standard. */
1574 unsigned nsaa = 0;
1575
1576 /* Stack item vector. */
1577 std::vector<stack_item_t> si;
1578 };
1579
1580 /* Pass a value in a sequence of consecutive X registers. The caller
1581 is responsible for ensuring sufficient registers are available. */
1582
1583 static void
1584 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1585 struct aarch64_call_info *info, struct type *type,
1586 struct value *arg)
1587 {
1588 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1589 int len = TYPE_LENGTH (type);
1590 enum type_code typecode = type->code ();
1591 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1592 const bfd_byte *buf = value_contents (arg);
1593
1594 info->argnum++;
1595
1596 while (len > 0)
1597 {
1598 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1599 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1600 byte_order);
1601
1602
1603 /* Adjust sub-word struct/union args when big-endian. */
1604 if (byte_order == BFD_ENDIAN_BIG
1605 && partial_len < X_REGISTER_SIZE
1606 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1607 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1608
1609 if (aarch64_debug)
1610 {
1611 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1612 gdbarch_register_name (gdbarch, regnum),
1613 phex (regval, X_REGISTER_SIZE));
1614 }
1615 regcache_cooked_write_unsigned (regcache, regnum, regval);
1616 len -= partial_len;
1617 buf += partial_len;
1618 regnum++;
1619 }
1620 }
1621
1622 /* Attempt to marshall a value in a V register. Return 1 if
1623 successful, or 0 if insufficient registers are available. This
1624 function, unlike the equivalent pass_in_x() function does not
1625 handle arguments spread across multiple registers. */
1626
1627 static int
1628 pass_in_v (struct gdbarch *gdbarch,
1629 struct regcache *regcache,
1630 struct aarch64_call_info *info,
1631 int len, const bfd_byte *buf)
1632 {
1633 if (info->nsrn < 8)
1634 {
1635 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1636 /* Enough space for a full vector register. */
1637 gdb_byte reg[register_size (gdbarch, regnum)];
1638 gdb_assert (len <= sizeof (reg));
1639
1640 info->argnum++;
1641 info->nsrn++;
1642
1643 memset (reg, 0, sizeof (reg));
1644 /* PCS C.1, the argument is allocated to the least significant
1645 bits of V register. */
1646 memcpy (reg, buf, len);
1647 regcache->cooked_write (regnum, reg);
1648
1649 if (aarch64_debug)
1650 {
1651 debug_printf ("arg %d in %s\n", info->argnum,
1652 gdbarch_register_name (gdbarch, regnum));
1653 }
1654 return 1;
1655 }
1656 info->nsrn = 8;
1657 return 0;
1658 }
1659
1660 /* Marshall an argument onto the stack. */
1661
1662 static void
1663 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1664 struct value *arg)
1665 {
1666 const bfd_byte *buf = value_contents (arg);
1667 int len = TYPE_LENGTH (type);
1668 int align;
1669 stack_item_t item;
1670
1671 info->argnum++;
1672
1673 align = type_align (type);
1674
1675 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1676 Natural alignment of the argument's type. */
1677 align = align_up (align, 8);
1678
1679 /* The AArch64 PCS requires at most doubleword alignment. */
1680 if (align > 16)
1681 align = 16;
1682
1683 if (aarch64_debug)
1684 {
1685 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1686 info->nsaa);
1687 }
1688
1689 item.len = len;
1690 item.data = buf;
1691 info->si.push_back (item);
1692
1693 info->nsaa += len;
1694 if (info->nsaa & (align - 1))
1695 {
1696 /* Push stack alignment padding. */
1697 int pad = align - (info->nsaa & (align - 1));
1698
1699 item.len = pad;
1700 item.data = NULL;
1701
1702 info->si.push_back (item);
1703 info->nsaa += pad;
1704 }
1705 }
1706
1707 /* Marshall an argument into a sequence of one or more consecutive X
1708 registers or, if insufficient X registers are available then onto
1709 the stack. */
1710
1711 static void
1712 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1713 struct aarch64_call_info *info, struct type *type,
1714 struct value *arg)
1715 {
1716 int len = TYPE_LENGTH (type);
1717 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1718
1719 /* PCS C.13 - Pass in registers if we have enough spare */
1720 if (info->ngrn + nregs <= 8)
1721 {
1722 pass_in_x (gdbarch, regcache, info, type, arg);
1723 info->ngrn += nregs;
1724 }
1725 else
1726 {
1727 info->ngrn = 8;
1728 pass_on_stack (info, type, arg);
1729 }
1730 }
1731
1732 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1733 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1734 registers. A return value of false is an error state as the value will have
1735 been partially passed to the stack. */
1736 static bool
1737 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1738 struct aarch64_call_info *info, struct type *arg_type,
1739 struct value *arg)
1740 {
1741 switch (arg_type->code ())
1742 {
1743 case TYPE_CODE_FLT:
1744 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1745 value_contents (arg));
1746 break;
1747
1748 case TYPE_CODE_COMPLEX:
1749 {
1750 const bfd_byte *buf = value_contents (arg);
1751 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1752
1753 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1754 buf))
1755 return false;
1756
1757 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1758 buf + TYPE_LENGTH (target_type));
1759 }
1760
1761 case TYPE_CODE_ARRAY:
1762 if (arg_type->is_vector ())
1763 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1764 value_contents (arg));
1765 /* fall through. */
1766
1767 case TYPE_CODE_STRUCT:
1768 case TYPE_CODE_UNION:
1769 for (int i = 0; i < arg_type->num_fields (); i++)
1770 {
1771 /* Don't include static fields. */
1772 if (field_is_static (&arg_type->field (i)))
1773 continue;
1774
1775 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1776 struct type *field_type = check_typedef (value_type (field));
1777
1778 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1779 field))
1780 return false;
1781 }
1782 return true;
1783
1784 default:
1785 return false;
1786 }
1787 }
1788
1789 /* Implement the "push_dummy_call" gdbarch method. */
1790
1791 static CORE_ADDR
1792 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1793 struct regcache *regcache, CORE_ADDR bp_addr,
1794 int nargs,
1795 struct value **args, CORE_ADDR sp,
1796 function_call_return_method return_method,
1797 CORE_ADDR struct_addr)
1798 {
1799 int argnum;
1800 struct aarch64_call_info info;
1801
1802 /* We need to know what the type of the called function is in order
1803 to determine the number of named/anonymous arguments for the
1804 actual argument placement, and the return type in order to handle
1805 return value correctly.
1806
1807 The generic code above us views the decision of return in memory
1808 or return in registers as a two stage processes. The language
1809 handler is consulted first and may decide to return in memory (eg
1810 class with copy constructor returned by value), this will cause
1811 the generic code to allocate space AND insert an initial leading
1812 argument.
1813
1814 If the language code does not decide to pass in memory then the
1815 target code is consulted.
1816
1817 If the language code decides to pass in memory we want to move
1818 the pointer inserted as the initial argument from the argument
1819 list and into X8, the conventional AArch64 struct return pointer
1820 register. */
1821
1822 /* Set the return address. For the AArch64, the return breakpoint
1823 is always at BP_ADDR. */
1824 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1825
1826 /* If we were given an initial argument for the return slot, lose it. */
1827 if (return_method == return_method_hidden_param)
1828 {
1829 args++;
1830 nargs--;
1831 }
1832
1833 /* The struct_return pointer occupies X8. */
1834 if (return_method != return_method_normal)
1835 {
1836 if (aarch64_debug)
1837 {
1838 debug_printf ("struct return in %s = 0x%s\n",
1839 gdbarch_register_name (gdbarch,
1840 AARCH64_STRUCT_RETURN_REGNUM),
1841 paddress (gdbarch, struct_addr));
1842 }
1843 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1844 struct_addr);
1845 }
1846
1847 for (argnum = 0; argnum < nargs; argnum++)
1848 {
1849 struct value *arg = args[argnum];
1850 struct type *arg_type, *fundamental_type;
1851 int len, elements;
1852
1853 arg_type = check_typedef (value_type (arg));
1854 len = TYPE_LENGTH (arg_type);
1855
1856 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1857 if there are enough spare registers. */
1858 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1859 &fundamental_type))
1860 {
1861 if (info.nsrn + elements <= 8)
1862 {
1863 /* We know that we have sufficient registers available therefore
1864 this will never need to fallback to the stack. */
1865 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1866 arg))
1867 gdb_assert_not_reached ("Failed to push args");
1868 }
1869 else
1870 {
1871 info.nsrn = 8;
1872 pass_on_stack (&info, arg_type, arg);
1873 }
1874 continue;
1875 }
1876
1877 switch (arg_type->code ())
1878 {
1879 case TYPE_CODE_INT:
1880 case TYPE_CODE_BOOL:
1881 case TYPE_CODE_CHAR:
1882 case TYPE_CODE_RANGE:
1883 case TYPE_CODE_ENUM:
1884 if (len < 4)
1885 {
1886 /* Promote to 32 bit integer. */
1887 if (arg_type->is_unsigned ())
1888 arg_type = builtin_type (gdbarch)->builtin_uint32;
1889 else
1890 arg_type = builtin_type (gdbarch)->builtin_int32;
1891 arg = value_cast (arg_type, arg);
1892 }
1893 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1894 break;
1895
1896 case TYPE_CODE_STRUCT:
1897 case TYPE_CODE_ARRAY:
1898 case TYPE_CODE_UNION:
1899 if (len > 16)
1900 {
1901 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1902 invisible reference. */
1903
1904 /* Allocate aligned storage. */
1905 sp = align_down (sp - len, 16);
1906
1907 /* Write the real data into the stack. */
1908 write_memory (sp, value_contents (arg), len);
1909
1910 /* Construct the indirection. */
1911 arg_type = lookup_pointer_type (arg_type);
1912 arg = value_from_pointer (arg_type, sp);
1913 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1914 }
1915 else
1916 /* PCS C.15 / C.18 multiple values pass. */
1917 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1918 break;
1919
1920 default:
1921 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1922 break;
1923 }
1924 }
1925
1926 /* Make sure stack retains 16 byte alignment. */
1927 if (info.nsaa & 15)
1928 sp -= 16 - (info.nsaa & 15);
1929
1930 while (!info.si.empty ())
1931 {
1932 const stack_item_t &si = info.si.back ();
1933
1934 sp -= si.len;
1935 if (si.data != NULL)
1936 write_memory (sp, si.data, si.len);
1937 info.si.pop_back ();
1938 }
1939
1940 /* Finally, update the SP register. */
1941 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1942
1943 return sp;
1944 }
1945
1946 /* Implement the "frame_align" gdbarch method. */
1947
1948 static CORE_ADDR
1949 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1950 {
1951 /* Align the stack to sixteen bytes. */
1952 return sp & ~(CORE_ADDR) 15;
1953 }
1954
1955 /* Return the type for an AdvSISD Q register. */
1956
1957 static struct type *
1958 aarch64_vnq_type (struct gdbarch *gdbarch)
1959 {
1960 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1961
1962 if (tdep->vnq_type == NULL)
1963 {
1964 struct type *t;
1965 struct type *elem;
1966
1967 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1968 TYPE_CODE_UNION);
1969
1970 elem = builtin_type (gdbarch)->builtin_uint128;
1971 append_composite_type_field (t, "u", elem);
1972
1973 elem = builtin_type (gdbarch)->builtin_int128;
1974 append_composite_type_field (t, "s", elem);
1975
1976 tdep->vnq_type = t;
1977 }
1978
1979 return tdep->vnq_type;
1980 }
1981
1982 /* Return the type for an AdvSISD D register. */
1983
1984 static struct type *
1985 aarch64_vnd_type (struct gdbarch *gdbarch)
1986 {
1987 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1988
1989 if (tdep->vnd_type == NULL)
1990 {
1991 struct type *t;
1992 struct type *elem;
1993
1994 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1995 TYPE_CODE_UNION);
1996
1997 elem = builtin_type (gdbarch)->builtin_double;
1998 append_composite_type_field (t, "f", elem);
1999
2000 elem = builtin_type (gdbarch)->builtin_uint64;
2001 append_composite_type_field (t, "u", elem);
2002
2003 elem = builtin_type (gdbarch)->builtin_int64;
2004 append_composite_type_field (t, "s", elem);
2005
2006 tdep->vnd_type = t;
2007 }
2008
2009 return tdep->vnd_type;
2010 }
2011
2012 /* Return the type for an AdvSISD S register. */
2013
2014 static struct type *
2015 aarch64_vns_type (struct gdbarch *gdbarch)
2016 {
2017 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2018
2019 if (tdep->vns_type == NULL)
2020 {
2021 struct type *t;
2022 struct type *elem;
2023
2024 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2025 TYPE_CODE_UNION);
2026
2027 elem = builtin_type (gdbarch)->builtin_float;
2028 append_composite_type_field (t, "f", elem);
2029
2030 elem = builtin_type (gdbarch)->builtin_uint32;
2031 append_composite_type_field (t, "u", elem);
2032
2033 elem = builtin_type (gdbarch)->builtin_int32;
2034 append_composite_type_field (t, "s", elem);
2035
2036 tdep->vns_type = t;
2037 }
2038
2039 return tdep->vns_type;
2040 }
2041
2042 /* Return the type for an AdvSISD H register. */
2043
2044 static struct type *
2045 aarch64_vnh_type (struct gdbarch *gdbarch)
2046 {
2047 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2048
2049 if (tdep->vnh_type == NULL)
2050 {
2051 struct type *t;
2052 struct type *elem;
2053
2054 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2055 TYPE_CODE_UNION);
2056
2057 elem = builtin_type (gdbarch)->builtin_half;
2058 append_composite_type_field (t, "f", elem);
2059
2060 elem = builtin_type (gdbarch)->builtin_uint16;
2061 append_composite_type_field (t, "u", elem);
2062
2063 elem = builtin_type (gdbarch)->builtin_int16;
2064 append_composite_type_field (t, "s", elem);
2065
2066 tdep->vnh_type = t;
2067 }
2068
2069 return tdep->vnh_type;
2070 }
2071
2072 /* Return the type for an AdvSISD B register. */
2073
2074 static struct type *
2075 aarch64_vnb_type (struct gdbarch *gdbarch)
2076 {
2077 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2078
2079 if (tdep->vnb_type == NULL)
2080 {
2081 struct type *t;
2082 struct type *elem;
2083
2084 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2085 TYPE_CODE_UNION);
2086
2087 elem = builtin_type (gdbarch)->builtin_uint8;
2088 append_composite_type_field (t, "u", elem);
2089
2090 elem = builtin_type (gdbarch)->builtin_int8;
2091 append_composite_type_field (t, "s", elem);
2092
2093 tdep->vnb_type = t;
2094 }
2095
2096 return tdep->vnb_type;
2097 }
2098
2099 /* Return the type for an AdvSISD V register. */
2100
2101 static struct type *
2102 aarch64_vnv_type (struct gdbarch *gdbarch)
2103 {
2104 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2105
2106 if (tdep->vnv_type == NULL)
2107 {
2108 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2109 slice from the non-pseudo vector registers. However NEON V registers
2110 are always vector registers, and need constructing as such. */
2111 const struct builtin_type *bt = builtin_type (gdbarch);
2112
2113 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2114 TYPE_CODE_UNION);
2115
2116 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2117 TYPE_CODE_UNION);
2118 append_composite_type_field (sub, "f",
2119 init_vector_type (bt->builtin_double, 2));
2120 append_composite_type_field (sub, "u",
2121 init_vector_type (bt->builtin_uint64, 2));
2122 append_composite_type_field (sub, "s",
2123 init_vector_type (bt->builtin_int64, 2));
2124 append_composite_type_field (t, "d", sub);
2125
2126 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2127 TYPE_CODE_UNION);
2128 append_composite_type_field (sub, "f",
2129 init_vector_type (bt->builtin_float, 4));
2130 append_composite_type_field (sub, "u",
2131 init_vector_type (bt->builtin_uint32, 4));
2132 append_composite_type_field (sub, "s",
2133 init_vector_type (bt->builtin_int32, 4));
2134 append_composite_type_field (t, "s", sub);
2135
2136 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2137 TYPE_CODE_UNION);
2138 append_composite_type_field (sub, "f",
2139 init_vector_type (bt->builtin_half, 8));
2140 append_composite_type_field (sub, "u",
2141 init_vector_type (bt->builtin_uint16, 8));
2142 append_composite_type_field (sub, "s",
2143 init_vector_type (bt->builtin_int16, 8));
2144 append_composite_type_field (t, "h", sub);
2145
2146 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2147 TYPE_CODE_UNION);
2148 append_composite_type_field (sub, "u",
2149 init_vector_type (bt->builtin_uint8, 16));
2150 append_composite_type_field (sub, "s",
2151 init_vector_type (bt->builtin_int8, 16));
2152 append_composite_type_field (t, "b", sub);
2153
2154 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2155 TYPE_CODE_UNION);
2156 append_composite_type_field (sub, "u",
2157 init_vector_type (bt->builtin_uint128, 1));
2158 append_composite_type_field (sub, "s",
2159 init_vector_type (bt->builtin_int128, 1));
2160 append_composite_type_field (t, "q", sub);
2161
2162 tdep->vnv_type = t;
2163 }
2164
2165 return tdep->vnv_type;
2166 }
2167
2168 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2169
2170 static int
2171 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2172 {
2173 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2174
2175 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2176 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2177
2178 if (reg == AARCH64_DWARF_SP)
2179 return AARCH64_SP_REGNUM;
2180
2181 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2182 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2183
2184 if (reg == AARCH64_DWARF_SVE_VG)
2185 return AARCH64_SVE_VG_REGNUM;
2186
2187 if (reg == AARCH64_DWARF_SVE_FFR)
2188 return AARCH64_SVE_FFR_REGNUM;
2189
2190 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2191 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2192
2193 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2194 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2195
2196 if (tdep->has_pauth ())
2197 {
2198 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2199 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2200
2201 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2202 return tdep->pauth_ra_state_regnum;
2203 }
2204
2205 return -1;
2206 }
2207
2208 /* Implement the "print_insn" gdbarch method. */
2209
2210 static int
2211 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2212 {
2213 info->symbols = NULL;
2214 return default_print_insn (memaddr, info);
2215 }
2216
2217 /* AArch64 BRK software debug mode instruction.
2218 Note that AArch64 code is always little-endian.
2219 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2220 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2221
2222 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2223
2224 /* Extract from an array REGS containing the (raw) register state a
2225 function return value of type TYPE, and copy that, in virtual
2226 format, into VALBUF. */
2227
2228 static void
2229 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2230 gdb_byte *valbuf)
2231 {
2232 struct gdbarch *gdbarch = regs->arch ();
2233 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2234 int elements;
2235 struct type *fundamental_type;
2236
2237 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2238 &fundamental_type))
2239 {
2240 int len = TYPE_LENGTH (fundamental_type);
2241
2242 for (int i = 0; i < elements; i++)
2243 {
2244 int regno = AARCH64_V0_REGNUM + i;
2245 /* Enough space for a full vector register. */
2246 gdb_byte buf[register_size (gdbarch, regno)];
2247 gdb_assert (len <= sizeof (buf));
2248
2249 if (aarch64_debug)
2250 {
2251 debug_printf ("read HFA or HVA return value element %d from %s\n",
2252 i + 1,
2253 gdbarch_register_name (gdbarch, regno));
2254 }
2255 regs->cooked_read (regno, buf);
2256
2257 memcpy (valbuf, buf, len);
2258 valbuf += len;
2259 }
2260 }
2261 else if (type->code () == TYPE_CODE_INT
2262 || type->code () == TYPE_CODE_CHAR
2263 || type->code () == TYPE_CODE_BOOL
2264 || type->code () == TYPE_CODE_PTR
2265 || TYPE_IS_REFERENCE (type)
2266 || type->code () == TYPE_CODE_ENUM)
2267 {
2268 /* If the type is a plain integer, then the access is
2269 straight-forward. Otherwise we have to play around a bit
2270 more. */
2271 int len = TYPE_LENGTH (type);
2272 int regno = AARCH64_X0_REGNUM;
2273 ULONGEST tmp;
2274
2275 while (len > 0)
2276 {
2277 /* By using store_unsigned_integer we avoid having to do
2278 anything special for small big-endian values. */
2279 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2280 store_unsigned_integer (valbuf,
2281 (len > X_REGISTER_SIZE
2282 ? X_REGISTER_SIZE : len), byte_order, tmp);
2283 len -= X_REGISTER_SIZE;
2284 valbuf += X_REGISTER_SIZE;
2285 }
2286 }
2287 else
2288 {
2289 /* For a structure or union the behaviour is as if the value had
2290 been stored to word-aligned memory and then loaded into
2291 registers with 64-bit load instruction(s). */
2292 int len = TYPE_LENGTH (type);
2293 int regno = AARCH64_X0_REGNUM;
2294 bfd_byte buf[X_REGISTER_SIZE];
2295
2296 while (len > 0)
2297 {
2298 regs->cooked_read (regno++, buf);
2299 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2300 len -= X_REGISTER_SIZE;
2301 valbuf += X_REGISTER_SIZE;
2302 }
2303 }
2304 }
2305
2306
2307 /* Will a function return an aggregate type in memory or in a
2308 register? Return 0 if an aggregate type can be returned in a
2309 register, 1 if it must be returned in memory. */
2310
2311 static int
2312 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2313 {
2314 type = check_typedef (type);
2315 int elements;
2316 struct type *fundamental_type;
2317
2318 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2319 &fundamental_type))
2320 {
2321 /* v0-v7 are used to return values and one register is allocated
2322 for one member. However, HFA or HVA has at most four members. */
2323 return 0;
2324 }
2325
2326 if (TYPE_LENGTH (type) > 16)
2327 {
2328 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2329 invisible reference. */
2330
2331 return 1;
2332 }
2333
2334 return 0;
2335 }
2336
2337 /* Write into appropriate registers a function return value of type
2338 TYPE, given in virtual format. */
2339
2340 static void
2341 aarch64_store_return_value (struct type *type, struct regcache *regs,
2342 const gdb_byte *valbuf)
2343 {
2344 struct gdbarch *gdbarch = regs->arch ();
2345 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2346 int elements;
2347 struct type *fundamental_type;
2348
2349 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2350 &fundamental_type))
2351 {
2352 int len = TYPE_LENGTH (fundamental_type);
2353
2354 for (int i = 0; i < elements; i++)
2355 {
2356 int regno = AARCH64_V0_REGNUM + i;
2357 /* Enough space for a full vector register. */
2358 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2359 gdb_assert (len <= sizeof (tmpbuf));
2360
2361 if (aarch64_debug)
2362 {
2363 debug_printf ("write HFA or HVA return value element %d to %s\n",
2364 i + 1,
2365 gdbarch_register_name (gdbarch, regno));
2366 }
2367
2368 memcpy (tmpbuf, valbuf,
2369 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2370 regs->cooked_write (regno, tmpbuf);
2371 valbuf += len;
2372 }
2373 }
2374 else if (type->code () == TYPE_CODE_INT
2375 || type->code () == TYPE_CODE_CHAR
2376 || type->code () == TYPE_CODE_BOOL
2377 || type->code () == TYPE_CODE_PTR
2378 || TYPE_IS_REFERENCE (type)
2379 || type->code () == TYPE_CODE_ENUM)
2380 {
2381 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2382 {
2383 /* Values of one word or less are zero/sign-extended and
2384 returned in r0. */
2385 bfd_byte tmpbuf[X_REGISTER_SIZE];
2386 LONGEST val = unpack_long (type, valbuf);
2387
2388 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2389 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2390 }
2391 else
2392 {
2393 /* Integral values greater than one word are stored in
2394 consecutive registers starting with r0. This will always
2395 be a multiple of the regiser size. */
2396 int len = TYPE_LENGTH (type);
2397 int regno = AARCH64_X0_REGNUM;
2398
2399 while (len > 0)
2400 {
2401 regs->cooked_write (regno++, valbuf);
2402 len -= X_REGISTER_SIZE;
2403 valbuf += X_REGISTER_SIZE;
2404 }
2405 }
2406 }
2407 else
2408 {
2409 /* For a structure or union the behaviour is as if the value had
2410 been stored to word-aligned memory and then loaded into
2411 registers with 64-bit load instruction(s). */
2412 int len = TYPE_LENGTH (type);
2413 int regno = AARCH64_X0_REGNUM;
2414 bfd_byte tmpbuf[X_REGISTER_SIZE];
2415
2416 while (len > 0)
2417 {
2418 memcpy (tmpbuf, valbuf,
2419 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2420 regs->cooked_write (regno++, tmpbuf);
2421 len -= X_REGISTER_SIZE;
2422 valbuf += X_REGISTER_SIZE;
2423 }
2424 }
2425 }
2426
2427 /* Implement the "return_value" gdbarch method. */
2428
2429 static enum return_value_convention
2430 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2431 struct type *valtype, struct regcache *regcache,
2432 gdb_byte *readbuf, const gdb_byte *writebuf)
2433 {
2434
2435 if (valtype->code () == TYPE_CODE_STRUCT
2436 || valtype->code () == TYPE_CODE_UNION
2437 || valtype->code () == TYPE_CODE_ARRAY)
2438 {
2439 if (aarch64_return_in_memory (gdbarch, valtype))
2440 {
2441 if (aarch64_debug)
2442 debug_printf ("return value in memory\n");
2443 return RETURN_VALUE_STRUCT_CONVENTION;
2444 }
2445 }
2446
2447 if (writebuf)
2448 aarch64_store_return_value (valtype, regcache, writebuf);
2449
2450 if (readbuf)
2451 aarch64_extract_return_value (valtype, regcache, readbuf);
2452
2453 if (aarch64_debug)
2454 debug_printf ("return value in registers\n");
2455
2456 return RETURN_VALUE_REGISTER_CONVENTION;
2457 }
2458
2459 /* Implement the "get_longjmp_target" gdbarch method. */
2460
2461 static int
2462 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2463 {
2464 CORE_ADDR jb_addr;
2465 gdb_byte buf[X_REGISTER_SIZE];
2466 struct gdbarch *gdbarch = get_frame_arch (frame);
2467 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2468 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2469
2470 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2471
2472 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2473 X_REGISTER_SIZE))
2474 return 0;
2475
2476 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2477 return 1;
2478 }
2479
2480 /* Implement the "gen_return_address" gdbarch method. */
2481
2482 static void
2483 aarch64_gen_return_address (struct gdbarch *gdbarch,
2484 struct agent_expr *ax, struct axs_value *value,
2485 CORE_ADDR scope)
2486 {
2487 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2488 value->kind = axs_lvalue_register;
2489 value->u.reg = AARCH64_LR_REGNUM;
2490 }
2491 \f
2492
2493 /* Return the pseudo register name corresponding to register regnum. */
2494
2495 static const char *
2496 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2497 {
2498 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2499
2500 static const char *const q_name[] =
2501 {
2502 "q0", "q1", "q2", "q3",
2503 "q4", "q5", "q6", "q7",
2504 "q8", "q9", "q10", "q11",
2505 "q12", "q13", "q14", "q15",
2506 "q16", "q17", "q18", "q19",
2507 "q20", "q21", "q22", "q23",
2508 "q24", "q25", "q26", "q27",
2509 "q28", "q29", "q30", "q31",
2510 };
2511
2512 static const char *const d_name[] =
2513 {
2514 "d0", "d1", "d2", "d3",
2515 "d4", "d5", "d6", "d7",
2516 "d8", "d9", "d10", "d11",
2517 "d12", "d13", "d14", "d15",
2518 "d16", "d17", "d18", "d19",
2519 "d20", "d21", "d22", "d23",
2520 "d24", "d25", "d26", "d27",
2521 "d28", "d29", "d30", "d31",
2522 };
2523
2524 static const char *const s_name[] =
2525 {
2526 "s0", "s1", "s2", "s3",
2527 "s4", "s5", "s6", "s7",
2528 "s8", "s9", "s10", "s11",
2529 "s12", "s13", "s14", "s15",
2530 "s16", "s17", "s18", "s19",
2531 "s20", "s21", "s22", "s23",
2532 "s24", "s25", "s26", "s27",
2533 "s28", "s29", "s30", "s31",
2534 };
2535
2536 static const char *const h_name[] =
2537 {
2538 "h0", "h1", "h2", "h3",
2539 "h4", "h5", "h6", "h7",
2540 "h8", "h9", "h10", "h11",
2541 "h12", "h13", "h14", "h15",
2542 "h16", "h17", "h18", "h19",
2543 "h20", "h21", "h22", "h23",
2544 "h24", "h25", "h26", "h27",
2545 "h28", "h29", "h30", "h31",
2546 };
2547
2548 static const char *const b_name[] =
2549 {
2550 "b0", "b1", "b2", "b3",
2551 "b4", "b5", "b6", "b7",
2552 "b8", "b9", "b10", "b11",
2553 "b12", "b13", "b14", "b15",
2554 "b16", "b17", "b18", "b19",
2555 "b20", "b21", "b22", "b23",
2556 "b24", "b25", "b26", "b27",
2557 "b28", "b29", "b30", "b31",
2558 };
2559
2560 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2561
2562 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2563 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2564
2565 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2566 return d_name[p_regnum - AARCH64_D0_REGNUM];
2567
2568 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2569 return s_name[p_regnum - AARCH64_S0_REGNUM];
2570
2571 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2572 return h_name[p_regnum - AARCH64_H0_REGNUM];
2573
2574 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2575 return b_name[p_regnum - AARCH64_B0_REGNUM];
2576
2577 if (tdep->has_sve ())
2578 {
2579 static const char *const sve_v_name[] =
2580 {
2581 "v0", "v1", "v2", "v3",
2582 "v4", "v5", "v6", "v7",
2583 "v8", "v9", "v10", "v11",
2584 "v12", "v13", "v14", "v15",
2585 "v16", "v17", "v18", "v19",
2586 "v20", "v21", "v22", "v23",
2587 "v24", "v25", "v26", "v27",
2588 "v28", "v29", "v30", "v31",
2589 };
2590
2591 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2592 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2593 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2594 }
2595
2596 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2597 prevents it from being read by methods such as
2598 mi_cmd_trace_frame_collected. */
2599 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2600 return "";
2601
2602 internal_error (__FILE__, __LINE__,
2603 _("aarch64_pseudo_register_name: bad register number %d"),
2604 p_regnum);
2605 }
2606
2607 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2608
2609 static struct type *
2610 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2611 {
2612 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2613
2614 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2615
2616 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2617 return aarch64_vnq_type (gdbarch);
2618
2619 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2620 return aarch64_vnd_type (gdbarch);
2621
2622 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2623 return aarch64_vns_type (gdbarch);
2624
2625 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2626 return aarch64_vnh_type (gdbarch);
2627
2628 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2629 return aarch64_vnb_type (gdbarch);
2630
2631 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2632 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2633 return aarch64_vnv_type (gdbarch);
2634
2635 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2636 return builtin_type (gdbarch)->builtin_uint64;
2637
2638 internal_error (__FILE__, __LINE__,
2639 _("aarch64_pseudo_register_type: bad register number %d"),
2640 p_regnum);
2641 }
2642
2643 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2644
2645 static int
2646 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2647 struct reggroup *group)
2648 {
2649 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2650
2651 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2652
2653 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2654 return group == all_reggroup || group == vector_reggroup;
2655 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2656 return (group == all_reggroup || group == vector_reggroup
2657 || group == float_reggroup);
2658 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2659 return (group == all_reggroup || group == vector_reggroup
2660 || group == float_reggroup);
2661 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2662 return group == all_reggroup || group == vector_reggroup;
2663 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2664 return group == all_reggroup || group == vector_reggroup;
2665 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2666 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2667 return group == all_reggroup || group == vector_reggroup;
2668 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2669 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2670 return 0;
2671
2672 return group == all_reggroup;
2673 }
2674
2675 /* Helper for aarch64_pseudo_read_value. */
2676
2677 static struct value *
2678 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2679 readable_regcache *regcache, int regnum_offset,
2680 int regsize, struct value *result_value)
2681 {
2682 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2683
2684 /* Enough space for a full vector register. */
2685 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2686 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2687
2688 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2689 mark_value_bytes_unavailable (result_value, 0,
2690 TYPE_LENGTH (value_type (result_value)));
2691 else
2692 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2693
2694 return result_value;
2695 }
2696
2697 /* Implement the "pseudo_register_read_value" gdbarch method. */
2698
2699 static struct value *
2700 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2701 int regnum)
2702 {
2703 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2704 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2705
2706 VALUE_LVAL (result_value) = lval_register;
2707 VALUE_REGNUM (result_value) = regnum;
2708
2709 regnum -= gdbarch_num_regs (gdbarch);
2710
2711 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2712 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2713 regnum - AARCH64_Q0_REGNUM,
2714 Q_REGISTER_SIZE, result_value);
2715
2716 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2717 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2718 regnum - AARCH64_D0_REGNUM,
2719 D_REGISTER_SIZE, result_value);
2720
2721 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2722 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2723 regnum - AARCH64_S0_REGNUM,
2724 S_REGISTER_SIZE, result_value);
2725
2726 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2727 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2728 regnum - AARCH64_H0_REGNUM,
2729 H_REGISTER_SIZE, result_value);
2730
2731 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2732 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2733 regnum - AARCH64_B0_REGNUM,
2734 B_REGISTER_SIZE, result_value);
2735
2736 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2737 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2738 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2739 regnum - AARCH64_SVE_V0_REGNUM,
2740 V_REGISTER_SIZE, result_value);
2741
2742 gdb_assert_not_reached ("regnum out of bound");
2743 }
2744
2745 /* Helper for aarch64_pseudo_write. */
2746
2747 static void
2748 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2749 int regnum_offset, int regsize, const gdb_byte *buf)
2750 {
2751 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2752
2753 /* Enough space for a full vector register. */
2754 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2755 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2756
2757 /* Ensure the register buffer is zero, we want gdb writes of the
2758 various 'scalar' pseudo registers to behavior like architectural
2759 writes, register width bytes are written the remainder are set to
2760 zero. */
2761 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2762
2763 memcpy (reg_buf, buf, regsize);
2764 regcache->raw_write (v_regnum, reg_buf);
2765 }
2766
2767 /* Implement the "pseudo_register_write" gdbarch method. */
2768
2769 static void
2770 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2771 int regnum, const gdb_byte *buf)
2772 {
2773 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2774 regnum -= gdbarch_num_regs (gdbarch);
2775
2776 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2777 return aarch64_pseudo_write_1 (gdbarch, regcache,
2778 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2779 buf);
2780
2781 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2782 return aarch64_pseudo_write_1 (gdbarch, regcache,
2783 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2784 buf);
2785
2786 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2787 return aarch64_pseudo_write_1 (gdbarch, regcache,
2788 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2789 buf);
2790
2791 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2792 return aarch64_pseudo_write_1 (gdbarch, regcache,
2793 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2794 buf);
2795
2796 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2797 return aarch64_pseudo_write_1 (gdbarch, regcache,
2798 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2799 buf);
2800
2801 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2802 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2803 return aarch64_pseudo_write_1 (gdbarch, regcache,
2804 regnum - AARCH64_SVE_V0_REGNUM,
2805 V_REGISTER_SIZE, buf);
2806
2807 gdb_assert_not_reached ("regnum out of bound");
2808 }
2809
2810 /* Callback function for user_reg_add. */
2811
2812 static struct value *
2813 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2814 {
2815 const int *reg_p = (const int *) baton;
2816
2817 return value_of_register (*reg_p, frame);
2818 }
2819 \f
2820
2821 /* Implement the "software_single_step" gdbarch method, needed to
2822 single step through atomic sequences on AArch64. */
2823
2824 static std::vector<CORE_ADDR>
2825 aarch64_software_single_step (struct regcache *regcache)
2826 {
2827 struct gdbarch *gdbarch = regcache->arch ();
2828 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2829 const int insn_size = 4;
2830 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2831 CORE_ADDR pc = regcache_read_pc (regcache);
2832 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2833 CORE_ADDR loc = pc;
2834 CORE_ADDR closing_insn = 0;
2835 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2836 byte_order_for_code);
2837 int index;
2838 int insn_count;
2839 int bc_insn_count = 0; /* Conditional branch instruction count. */
2840 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2841 aarch64_inst inst;
2842
2843 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2844 return {};
2845
2846 /* Look for a Load Exclusive instruction which begins the sequence. */
2847 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2848 return {};
2849
2850 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2851 {
2852 loc += insn_size;
2853 insn = read_memory_unsigned_integer (loc, insn_size,
2854 byte_order_for_code);
2855
2856 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2857 return {};
2858 /* Check if the instruction is a conditional branch. */
2859 if (inst.opcode->iclass == condbranch)
2860 {
2861 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2862
2863 if (bc_insn_count >= 1)
2864 return {};
2865
2866 /* It is, so we'll try to set a breakpoint at the destination. */
2867 breaks[1] = loc + inst.operands[0].imm.value;
2868
2869 bc_insn_count++;
2870 last_breakpoint++;
2871 }
2872
2873 /* Look for the Store Exclusive which closes the atomic sequence. */
2874 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2875 {
2876 closing_insn = loc;
2877 break;
2878 }
2879 }
2880
2881 /* We didn't find a closing Store Exclusive instruction, fall back. */
2882 if (!closing_insn)
2883 return {};
2884
2885 /* Insert breakpoint after the end of the atomic sequence. */
2886 breaks[0] = loc + insn_size;
2887
2888 /* Check for duplicated breakpoints, and also check that the second
2889 breakpoint is not within the atomic sequence. */
2890 if (last_breakpoint
2891 && (breaks[1] == breaks[0]
2892 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2893 last_breakpoint = 0;
2894
2895 std::vector<CORE_ADDR> next_pcs;
2896
2897 /* Insert the breakpoint at the end of the sequence, and one at the
2898 destination of the conditional branch, if it exists. */
2899 for (index = 0; index <= last_breakpoint; index++)
2900 next_pcs.push_back (breaks[index]);
2901
2902 return next_pcs;
2903 }
2904
2905 struct aarch64_displaced_step_copy_insn_closure
2906 : public displaced_step_copy_insn_closure
2907 {
2908 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2909 is being displaced stepping. */
2910 bool cond = false;
2911
2912 /* PC adjustment offset after displaced stepping. If 0, then we don't
2913 write the PC back, assuming the PC is already the right address. */
2914 int32_t pc_adjust = 0;
2915 };
2916
2917 /* Data when visiting instructions for displaced stepping. */
2918
2919 struct aarch64_displaced_step_data
2920 {
2921 struct aarch64_insn_data base;
2922
2923 /* The address where the instruction will be executed at. */
2924 CORE_ADDR new_addr;
2925 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2926 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2927 /* Number of instructions in INSN_BUF. */
2928 unsigned insn_count;
2929 /* Registers when doing displaced stepping. */
2930 struct regcache *regs;
2931
2932 aarch64_displaced_step_copy_insn_closure *dsc;
2933 };
2934
2935 /* Implementation of aarch64_insn_visitor method "b". */
2936
2937 static void
2938 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2939 struct aarch64_insn_data *data)
2940 {
2941 struct aarch64_displaced_step_data *dsd
2942 = (struct aarch64_displaced_step_data *) data;
2943 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2944
2945 if (can_encode_int32 (new_offset, 28))
2946 {
2947 /* Emit B rather than BL, because executing BL on a new address
2948 will get the wrong address into LR. In order to avoid this,
2949 we emit B, and update LR if the instruction is BL. */
2950 emit_b (dsd->insn_buf, 0, new_offset);
2951 dsd->insn_count++;
2952 }
2953 else
2954 {
2955 /* Write NOP. */
2956 emit_nop (dsd->insn_buf);
2957 dsd->insn_count++;
2958 dsd->dsc->pc_adjust = offset;
2959 }
2960
2961 if (is_bl)
2962 {
2963 /* Update LR. */
2964 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2965 data->insn_addr + 4);
2966 }
2967 }
2968
2969 /* Implementation of aarch64_insn_visitor method "b_cond". */
2970
2971 static void
2972 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2973 struct aarch64_insn_data *data)
2974 {
2975 struct aarch64_displaced_step_data *dsd
2976 = (struct aarch64_displaced_step_data *) data;
2977
2978 /* GDB has to fix up PC after displaced step this instruction
2979 differently according to the condition is true or false. Instead
2980 of checking COND against conditional flags, we can use
2981 the following instructions, and GDB can tell how to fix up PC
2982 according to the PC value.
2983
2984 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2985 INSN1 ;
2986 TAKEN:
2987 INSN2
2988 */
2989
2990 emit_bcond (dsd->insn_buf, cond, 8);
2991 dsd->dsc->cond = true;
2992 dsd->dsc->pc_adjust = offset;
2993 dsd->insn_count = 1;
2994 }
2995
2996 /* Dynamically allocate a new register. If we know the register
2997 statically, we should make it a global as above instead of using this
2998 helper function. */
2999
3000 static struct aarch64_register
3001 aarch64_register (unsigned num, int is64)
3002 {
3003 return (struct aarch64_register) { num, is64 };
3004 }
3005
3006 /* Implementation of aarch64_insn_visitor method "cb". */
3007
3008 static void
3009 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3010 const unsigned rn, int is64,
3011 struct aarch64_insn_data *data)
3012 {
3013 struct aarch64_displaced_step_data *dsd
3014 = (struct aarch64_displaced_step_data *) data;
3015
3016 /* The offset is out of range for a compare and branch
3017 instruction. We can use the following instructions instead:
3018
3019 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3020 INSN1 ;
3021 TAKEN:
3022 INSN2
3023 */
3024 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3025 dsd->insn_count = 1;
3026 dsd->dsc->cond = true;
3027 dsd->dsc->pc_adjust = offset;
3028 }
3029
3030 /* Implementation of aarch64_insn_visitor method "tb". */
3031
3032 static void
3033 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3034 const unsigned rt, unsigned bit,
3035 struct aarch64_insn_data *data)
3036 {
3037 struct aarch64_displaced_step_data *dsd
3038 = (struct aarch64_displaced_step_data *) data;
3039
3040 /* The offset is out of range for a test bit and branch
3041 instruction We can use the following instructions instead:
3042
3043 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3044 INSN1 ;
3045 TAKEN:
3046 INSN2
3047
3048 */
3049 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3050 dsd->insn_count = 1;
3051 dsd->dsc->cond = true;
3052 dsd->dsc->pc_adjust = offset;
3053 }
3054
3055 /* Implementation of aarch64_insn_visitor method "adr". */
3056
3057 static void
3058 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3059 const int is_adrp, struct aarch64_insn_data *data)
3060 {
3061 struct aarch64_displaced_step_data *dsd
3062 = (struct aarch64_displaced_step_data *) data;
3063 /* We know exactly the address the ADR{P,} instruction will compute.
3064 We can just write it to the destination register. */
3065 CORE_ADDR address = data->insn_addr + offset;
3066
3067 if (is_adrp)
3068 {
3069 /* Clear the lower 12 bits of the offset to get the 4K page. */
3070 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3071 address & ~0xfff);
3072 }
3073 else
3074 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3075 address);
3076
3077 dsd->dsc->pc_adjust = 4;
3078 emit_nop (dsd->insn_buf);
3079 dsd->insn_count = 1;
3080 }
3081
3082 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3083
3084 static void
3085 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3086 const unsigned rt, const int is64,
3087 struct aarch64_insn_data *data)
3088 {
3089 struct aarch64_displaced_step_data *dsd
3090 = (struct aarch64_displaced_step_data *) data;
3091 CORE_ADDR address = data->insn_addr + offset;
3092 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3093
3094 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3095 address);
3096
3097 if (is_sw)
3098 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3099 aarch64_register (rt, 1), zero);
3100 else
3101 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3102 aarch64_register (rt, 1), zero);
3103
3104 dsd->dsc->pc_adjust = 4;
3105 }
3106
3107 /* Implementation of aarch64_insn_visitor method "others". */
3108
3109 static void
3110 aarch64_displaced_step_others (const uint32_t insn,
3111 struct aarch64_insn_data *data)
3112 {
3113 struct aarch64_displaced_step_data *dsd
3114 = (struct aarch64_displaced_step_data *) data;
3115
3116 aarch64_emit_insn (dsd->insn_buf, insn);
3117 dsd->insn_count = 1;
3118
3119 if ((insn & 0xfffffc1f) == 0xd65f0000)
3120 {
3121 /* RET */
3122 dsd->dsc->pc_adjust = 0;
3123 }
3124 else
3125 dsd->dsc->pc_adjust = 4;
3126 }
3127
3128 static const struct aarch64_insn_visitor visitor =
3129 {
3130 aarch64_displaced_step_b,
3131 aarch64_displaced_step_b_cond,
3132 aarch64_displaced_step_cb,
3133 aarch64_displaced_step_tb,
3134 aarch64_displaced_step_adr,
3135 aarch64_displaced_step_ldr_literal,
3136 aarch64_displaced_step_others,
3137 };
3138
3139 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3140
3141 displaced_step_copy_insn_closure_up
3142 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3143 CORE_ADDR from, CORE_ADDR to,
3144 struct regcache *regs)
3145 {
3146 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3147 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3148 struct aarch64_displaced_step_data dsd;
3149 aarch64_inst inst;
3150
3151 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3152 return NULL;
3153
3154 /* Look for a Load Exclusive instruction which begins the sequence. */
3155 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3156 {
3157 /* We can't displaced step atomic sequences. */
3158 return NULL;
3159 }
3160
3161 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3162 (new aarch64_displaced_step_copy_insn_closure);
3163 dsd.base.insn_addr = from;
3164 dsd.new_addr = to;
3165 dsd.regs = regs;
3166 dsd.dsc = dsc.get ();
3167 dsd.insn_count = 0;
3168 aarch64_relocate_instruction (insn, &visitor,
3169 (struct aarch64_insn_data *) &dsd);
3170 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3171
3172 if (dsd.insn_count != 0)
3173 {
3174 int i;
3175
3176 /* Instruction can be relocated to scratch pad. Copy
3177 relocated instruction(s) there. */
3178 for (i = 0; i < dsd.insn_count; i++)
3179 {
3180 displaced_debug_printf ("writing insn %.8x at %s",
3181 dsd.insn_buf[i],
3182 paddress (gdbarch, to + i * 4));
3183
3184 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3185 (ULONGEST) dsd.insn_buf[i]);
3186 }
3187 }
3188 else
3189 {
3190 dsc = NULL;
3191 }
3192
3193 /* This is a work around for a problem with g++ 4.8. */
3194 return displaced_step_copy_insn_closure_up (dsc.release ());
3195 }
3196
3197 /* Implement the "displaced_step_fixup" gdbarch method. */
3198
3199 void
3200 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3201 struct displaced_step_copy_insn_closure *dsc_,
3202 CORE_ADDR from, CORE_ADDR to,
3203 struct regcache *regs)
3204 {
3205 aarch64_displaced_step_copy_insn_closure *dsc
3206 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3207
3208 ULONGEST pc;
3209
3210 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3211
3212 displaced_debug_printf ("PC after stepping: %s (was %s).",
3213 paddress (gdbarch, pc), paddress (gdbarch, to));
3214
3215 if (dsc->cond)
3216 {
3217 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3218 dsc->pc_adjust);
3219
3220 if (pc - to == 8)
3221 {
3222 /* Condition is true. */
3223 }
3224 else if (pc - to == 4)
3225 {
3226 /* Condition is false. */
3227 dsc->pc_adjust = 4;
3228 }
3229 else
3230 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3231
3232 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3233 dsc->pc_adjust);
3234 }
3235
3236 displaced_debug_printf ("%s PC by %d",
3237 dsc->pc_adjust ? "adjusting" : "not adjusting",
3238 dsc->pc_adjust);
3239
3240 if (dsc->pc_adjust != 0)
3241 {
3242 /* Make sure the previous instruction was executed (that is, the PC
3243 has changed). If the PC didn't change, then discard the adjustment
3244 offset. Otherwise we may skip an instruction before its execution
3245 took place. */
3246 if ((pc - to) == 0)
3247 {
3248 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3249 dsc->pc_adjust = 0;
3250 }
3251
3252 displaced_debug_printf ("fixup: set PC to %s:%d",
3253 paddress (gdbarch, from), dsc->pc_adjust);
3254
3255 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3256 from + dsc->pc_adjust);
3257 }
3258 }
3259
3260 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3261
3262 bool
3263 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3264 {
3265 return true;
3266 }
3267
3268 /* Get the correct target description for the given VQ value.
3269 If VQ is zero then it is assumed SVE is not supported.
3270 (It is not possible to set VQ to zero on an SVE system). */
3271
3272 const target_desc *
3273 aarch64_read_description (uint64_t vq, bool pauth_p)
3274 {
3275 if (vq > AARCH64_MAX_SVE_VQ)
3276 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3277 AARCH64_MAX_SVE_VQ);
3278
3279 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3280
3281 if (tdesc == NULL)
3282 {
3283 tdesc = aarch64_create_target_description (vq, pauth_p);
3284 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3285 }
3286
3287 return tdesc;
3288 }
3289
3290 /* Return the VQ used when creating the target description TDESC. */
3291
3292 static uint64_t
3293 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3294 {
3295 const struct tdesc_feature *feature_sve;
3296
3297 if (!tdesc_has_registers (tdesc))
3298 return 0;
3299
3300 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3301
3302 if (feature_sve == nullptr)
3303 return 0;
3304
3305 uint64_t vl = tdesc_register_bitsize (feature_sve,
3306 aarch64_sve_register_names[0]) / 8;
3307 return sve_vq_from_vl (vl);
3308 }
3309
3310 /* Add all the expected register sets into GDBARCH. */
3311
3312 static void
3313 aarch64_add_reggroups (struct gdbarch *gdbarch)
3314 {
3315 reggroup_add (gdbarch, general_reggroup);
3316 reggroup_add (gdbarch, float_reggroup);
3317 reggroup_add (gdbarch, system_reggroup);
3318 reggroup_add (gdbarch, vector_reggroup);
3319 reggroup_add (gdbarch, all_reggroup);
3320 reggroup_add (gdbarch, save_reggroup);
3321 reggroup_add (gdbarch, restore_reggroup);
3322 }
3323
3324 /* Implement the "cannot_store_register" gdbarch method. */
3325
3326 static int
3327 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3328 {
3329 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3330
3331 if (!tdep->has_pauth ())
3332 return 0;
3333
3334 /* Pointer authentication registers are read-only. */
3335 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3336 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3337 }
3338
3339 /* Initialize the current architecture based on INFO. If possible,
3340 re-use an architecture from ARCHES, which is a list of
3341 architectures already created during this debugging session.
3342
3343 Called e.g. at program startup, when reading a core file, and when
3344 reading a binary file. */
3345
3346 static struct gdbarch *
3347 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3348 {
3349 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3350 const struct tdesc_feature *feature_pauth;
3351 bool valid_p = true;
3352 int i, num_regs = 0, num_pseudo_regs = 0;
3353 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3354
3355 /* Use the vector length passed via the target info. Here -1 is used for no
3356 SVE, and 0 is unset. If unset then use the vector length from the existing
3357 tdesc. */
3358 uint64_t vq = 0;
3359 if (info.id == (int *) -1)
3360 vq = 0;
3361 else if (info.id != 0)
3362 vq = (uint64_t) info.id;
3363 else
3364 vq = aarch64_get_tdesc_vq (info.target_desc);
3365
3366 if (vq > AARCH64_MAX_SVE_VQ)
3367 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3368 pulongest (vq), AARCH64_MAX_SVE_VQ);
3369
3370 /* If there is already a candidate, use it. */
3371 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3372 best_arch != nullptr;
3373 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3374 {
3375 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3376 if (tdep && tdep->vq == vq)
3377 return best_arch->gdbarch;
3378 }
3379
3380 /* Ensure we always have a target descriptor, and that it is for the given VQ
3381 value. */
3382 const struct target_desc *tdesc = info.target_desc;
3383 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3384 tdesc = aarch64_read_description (vq, false);
3385 gdb_assert (tdesc);
3386
3387 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3388 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3389 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3390 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3391
3392 if (feature_core == nullptr)
3393 return nullptr;
3394
3395 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3396
3397 /* Validate the description provides the mandatory core R registers
3398 and allocate their numbers. */
3399 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3400 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3401 AARCH64_X0_REGNUM + i,
3402 aarch64_r_register_names[i]);
3403
3404 num_regs = AARCH64_X0_REGNUM + i;
3405
3406 /* Add the V registers. */
3407 if (feature_fpu != nullptr)
3408 {
3409 if (feature_sve != nullptr)
3410 error (_("Program contains both fpu and SVE features."));
3411
3412 /* Validate the description provides the mandatory V registers
3413 and allocate their numbers. */
3414 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3415 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3416 AARCH64_V0_REGNUM + i,
3417 aarch64_v_register_names[i]);
3418
3419 num_regs = AARCH64_V0_REGNUM + i;
3420 }
3421
3422 /* Add the SVE registers. */
3423 if (feature_sve != nullptr)
3424 {
3425 /* Validate the description provides the mandatory SVE registers
3426 and allocate their numbers. */
3427 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3428 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3429 AARCH64_SVE_Z0_REGNUM + i,
3430 aarch64_sve_register_names[i]);
3431
3432 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3433 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3434 }
3435
3436 if (feature_fpu != nullptr || feature_sve != nullptr)
3437 {
3438 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3439 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3440 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3441 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3442 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3443 }
3444
3445 /* Add the pauth registers. */
3446 if (feature_pauth != NULL)
3447 {
3448 first_pauth_regnum = num_regs;
3449 pauth_ra_state_offset = num_pseudo_regs;
3450 /* Validate the descriptor provides the mandatory PAUTH registers and
3451 allocate their numbers. */
3452 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3453 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3454 first_pauth_regnum + i,
3455 aarch64_pauth_register_names[i]);
3456
3457 num_regs += i;
3458 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3459 }
3460
3461 if (!valid_p)
3462 return nullptr;
3463
3464 /* AArch64 code is always little-endian. */
3465 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3466
3467 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3468 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3469
3470 /* This should be low enough for everything. */
3471 tdep->lowest_pc = 0x20;
3472 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3473 tdep->jb_elt_size = 8;
3474 tdep->vq = vq;
3475 tdep->pauth_reg_base = first_pauth_regnum;
3476 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3477 : pauth_ra_state_offset + num_regs;
3478
3479 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3480 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3481
3482 /* Advance PC across function entry code. */
3483 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3484
3485 /* The stack grows downward. */
3486 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3487
3488 /* Breakpoint manipulation. */
3489 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3490 aarch64_breakpoint::kind_from_pc);
3491 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3492 aarch64_breakpoint::bp_from_kind);
3493 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3494 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3495
3496 /* Information about registers, etc. */
3497 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3498 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3499 set_gdbarch_num_regs (gdbarch, num_regs);
3500
3501 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3502 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3503 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3504 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3505 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3506 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3507 aarch64_pseudo_register_reggroup_p);
3508 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3509
3510 /* ABI */
3511 set_gdbarch_short_bit (gdbarch, 16);
3512 set_gdbarch_int_bit (gdbarch, 32);
3513 set_gdbarch_float_bit (gdbarch, 32);
3514 set_gdbarch_double_bit (gdbarch, 64);
3515 set_gdbarch_long_double_bit (gdbarch, 128);
3516 set_gdbarch_long_bit (gdbarch, 64);
3517 set_gdbarch_long_long_bit (gdbarch, 64);
3518 set_gdbarch_ptr_bit (gdbarch, 64);
3519 set_gdbarch_char_signed (gdbarch, 0);
3520 set_gdbarch_wchar_signed (gdbarch, 0);
3521 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3522 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3523 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3524 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3525
3526 /* Internal <-> external register number maps. */
3527 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3528
3529 /* Returning results. */
3530 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3531
3532 /* Disassembly. */
3533 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3534
3535 /* Virtual tables. */
3536 set_gdbarch_vbit_in_delta (gdbarch, 1);
3537
3538 /* Register architecture. */
3539 aarch64_add_reggroups (gdbarch);
3540
3541 /* Hook in the ABI-specific overrides, if they have been registered. */
3542 info.target_desc = tdesc;
3543 info.tdesc_data = tdesc_data.get ();
3544 gdbarch_init_osabi (info, gdbarch);
3545
3546 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3547 /* Register DWARF CFA vendor handler. */
3548 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3549 aarch64_execute_dwarf_cfa_vendor_op);
3550
3551 /* Permanent/Program breakpoint handling. */
3552 set_gdbarch_program_breakpoint_here_p (gdbarch,
3553 aarch64_program_breakpoint_here_p);
3554
3555 /* Add some default predicates. */
3556 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3557 dwarf2_append_unwinders (gdbarch);
3558 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3559
3560 frame_base_set_default (gdbarch, &aarch64_normal_base);
3561
3562 /* Now we have tuned the configuration, set a few final things,
3563 based on what the OS ABI has told us. */
3564
3565 if (tdep->jb_pc >= 0)
3566 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3567
3568 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3569
3570 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3571
3572 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3573
3574 /* Add standard register aliases. */
3575 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3576 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3577 value_of_aarch64_user_reg,
3578 &aarch64_register_aliases[i].regnum);
3579
3580 register_aarch64_ravenscar_ops (gdbarch);
3581
3582 return gdbarch;
3583 }
3584
3585 static void
3586 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3587 {
3588 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3589
3590 if (tdep == NULL)
3591 return;
3592
3593 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3594 paddress (gdbarch, tdep->lowest_pc));
3595 }
3596
3597 #if GDB_SELF_TEST
3598 namespace selftests
3599 {
3600 static void aarch64_process_record_test (void);
3601 }
3602 #endif
3603
3604 void _initialize_aarch64_tdep ();
3605 void
3606 _initialize_aarch64_tdep ()
3607 {
3608 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3609 aarch64_dump_tdep);
3610
3611 /* Debug this file's internals. */
3612 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3613 Set AArch64 debugging."), _("\
3614 Show AArch64 debugging."), _("\
3615 When on, AArch64 specific debugging is enabled."),
3616 NULL,
3617 show_aarch64_debug,
3618 &setdebuglist, &showdebuglist);
3619
3620 #if GDB_SELF_TEST
3621 selftests::register_test ("aarch64-analyze-prologue",
3622 selftests::aarch64_analyze_prologue_test);
3623 selftests::register_test ("aarch64-process-record",
3624 selftests::aarch64_process_record_test);
3625 #endif
3626 }
3627
3628 /* AArch64 process record-replay related structures, defines etc. */
3629
3630 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3631 do \
3632 { \
3633 unsigned int reg_len = LENGTH; \
3634 if (reg_len) \
3635 { \
3636 REGS = XNEWVEC (uint32_t, reg_len); \
3637 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3638 } \
3639 } \
3640 while (0)
3641
3642 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3643 do \
3644 { \
3645 unsigned int mem_len = LENGTH; \
3646 if (mem_len) \
3647 { \
3648 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3649 memcpy(&MEMS->len, &RECORD_BUF[0], \
3650 sizeof(struct aarch64_mem_r) * LENGTH); \
3651 } \
3652 } \
3653 while (0)
3654
3655 /* AArch64 record/replay structures and enumerations. */
3656
3657 struct aarch64_mem_r
3658 {
3659 uint64_t len; /* Record length. */
3660 uint64_t addr; /* Memory address. */
3661 };
3662
3663 enum aarch64_record_result
3664 {
3665 AARCH64_RECORD_SUCCESS,
3666 AARCH64_RECORD_UNSUPPORTED,
3667 AARCH64_RECORD_UNKNOWN
3668 };
3669
3670 typedef struct insn_decode_record_t
3671 {
3672 struct gdbarch *gdbarch;
3673 struct regcache *regcache;
3674 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3675 uint32_t aarch64_insn; /* Insn to be recorded. */
3676 uint32_t mem_rec_count; /* Count of memory records. */
3677 uint32_t reg_rec_count; /* Count of register records. */
3678 uint32_t *aarch64_regs; /* Registers to be recorded. */
3679 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3680 } insn_decode_record;
3681
3682 /* Record handler for data processing - register instructions. */
3683
3684 static unsigned int
3685 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3686 {
3687 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3688 uint32_t record_buf[4];
3689
3690 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3691 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3692 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3693
3694 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3695 {
3696 uint8_t setflags;
3697
3698 /* Logical (shifted register). */
3699 if (insn_bits24_27 == 0x0a)
3700 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3701 /* Add/subtract. */
3702 else if (insn_bits24_27 == 0x0b)
3703 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3704 else
3705 return AARCH64_RECORD_UNKNOWN;
3706
3707 record_buf[0] = reg_rd;
3708 aarch64_insn_r->reg_rec_count = 1;
3709 if (setflags)
3710 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3711 }
3712 else
3713 {
3714 if (insn_bits24_27 == 0x0b)
3715 {
3716 /* Data-processing (3 source). */
3717 record_buf[0] = reg_rd;
3718 aarch64_insn_r->reg_rec_count = 1;
3719 }
3720 else if (insn_bits24_27 == 0x0a)
3721 {
3722 if (insn_bits21_23 == 0x00)
3723 {
3724 /* Add/subtract (with carry). */
3725 record_buf[0] = reg_rd;
3726 aarch64_insn_r->reg_rec_count = 1;
3727 if (bit (aarch64_insn_r->aarch64_insn, 29))
3728 {
3729 record_buf[1] = AARCH64_CPSR_REGNUM;
3730 aarch64_insn_r->reg_rec_count = 2;
3731 }
3732 }
3733 else if (insn_bits21_23 == 0x02)
3734 {
3735 /* Conditional compare (register) and conditional compare
3736 (immediate) instructions. */
3737 record_buf[0] = AARCH64_CPSR_REGNUM;
3738 aarch64_insn_r->reg_rec_count = 1;
3739 }
3740 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3741 {
3742 /* Conditional select. */
3743 /* Data-processing (2 source). */
3744 /* Data-processing (1 source). */
3745 record_buf[0] = reg_rd;
3746 aarch64_insn_r->reg_rec_count = 1;
3747 }
3748 else
3749 return AARCH64_RECORD_UNKNOWN;
3750 }
3751 }
3752
3753 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3754 record_buf);
3755 return AARCH64_RECORD_SUCCESS;
3756 }
3757
3758 /* Record handler for data processing - immediate instructions. */
3759
3760 static unsigned int
3761 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3762 {
3763 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3764 uint32_t record_buf[4];
3765
3766 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3767 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3768 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3769
3770 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3771 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3772 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3773 {
3774 record_buf[0] = reg_rd;
3775 aarch64_insn_r->reg_rec_count = 1;
3776 }
3777 else if (insn_bits24_27 == 0x01)
3778 {
3779 /* Add/Subtract (immediate). */
3780 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3781 record_buf[0] = reg_rd;
3782 aarch64_insn_r->reg_rec_count = 1;
3783 if (setflags)
3784 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3785 }
3786 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3787 {
3788 /* Logical (immediate). */
3789 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3790 record_buf[0] = reg_rd;
3791 aarch64_insn_r->reg_rec_count = 1;
3792 if (setflags)
3793 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3794 }
3795 else
3796 return AARCH64_RECORD_UNKNOWN;
3797
3798 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3799 record_buf);
3800 return AARCH64_RECORD_SUCCESS;
3801 }
3802
3803 /* Record handler for branch, exception generation and system instructions. */
3804
3805 static unsigned int
3806 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3807 {
3808 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3809 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3810 uint32_t record_buf[4];
3811
3812 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3813 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3814 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3815
3816 if (insn_bits28_31 == 0x0d)
3817 {
3818 /* Exception generation instructions. */
3819 if (insn_bits24_27 == 0x04)
3820 {
3821 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3822 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3823 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3824 {
3825 ULONGEST svc_number;
3826
3827 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3828 &svc_number);
3829 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3830 svc_number);
3831 }
3832 else
3833 return AARCH64_RECORD_UNSUPPORTED;
3834 }
3835 /* System instructions. */
3836 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3837 {
3838 uint32_t reg_rt, reg_crn;
3839
3840 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3841 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3842
3843 /* Record rt in case of sysl and mrs instructions. */
3844 if (bit (aarch64_insn_r->aarch64_insn, 21))
3845 {
3846 record_buf[0] = reg_rt;
3847 aarch64_insn_r->reg_rec_count = 1;
3848 }
3849 /* Record cpsr for hint and msr(immediate) instructions. */
3850 else if (reg_crn == 0x02 || reg_crn == 0x04)
3851 {
3852 record_buf[0] = AARCH64_CPSR_REGNUM;
3853 aarch64_insn_r->reg_rec_count = 1;
3854 }
3855 }
3856 /* Unconditional branch (register). */
3857 else if((insn_bits24_27 & 0x0e) == 0x06)
3858 {
3859 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3860 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3861 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3862 }
3863 else
3864 return AARCH64_RECORD_UNKNOWN;
3865 }
3866 /* Unconditional branch (immediate). */
3867 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3868 {
3869 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3870 if (bit (aarch64_insn_r->aarch64_insn, 31))
3871 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3872 }
3873 else
3874 /* Compare & branch (immediate), Test & branch (immediate) and
3875 Conditional branch (immediate). */
3876 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3877
3878 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3879 record_buf);
3880 return AARCH64_RECORD_SUCCESS;
3881 }
3882
3883 /* Record handler for advanced SIMD load and store instructions. */
3884
3885 static unsigned int
3886 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3887 {
3888 CORE_ADDR address;
3889 uint64_t addr_offset = 0;
3890 uint32_t record_buf[24];
3891 uint64_t record_buf_mem[24];
3892 uint32_t reg_rn, reg_rt;
3893 uint32_t reg_index = 0, mem_index = 0;
3894 uint8_t opcode_bits, size_bits;
3895
3896 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3897 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3898 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3899 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3900 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3901
3902 if (record_debug)
3903 debug_printf ("Process record: Advanced SIMD load/store\n");
3904
3905 /* Load/store single structure. */
3906 if (bit (aarch64_insn_r->aarch64_insn, 24))
3907 {
3908 uint8_t sindex, scale, selem, esize, replicate = 0;
3909 scale = opcode_bits >> 2;
3910 selem = ((opcode_bits & 0x02) |
3911 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3912 switch (scale)
3913 {
3914 case 1:
3915 if (size_bits & 0x01)
3916 return AARCH64_RECORD_UNKNOWN;
3917 break;
3918 case 2:
3919 if ((size_bits >> 1) & 0x01)
3920 return AARCH64_RECORD_UNKNOWN;
3921 if (size_bits & 0x01)
3922 {
3923 if (!((opcode_bits >> 1) & 0x01))
3924 scale = 3;
3925 else
3926 return AARCH64_RECORD_UNKNOWN;
3927 }
3928 break;
3929 case 3:
3930 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3931 {
3932 scale = size_bits;
3933 replicate = 1;
3934 break;
3935 }
3936 else
3937 return AARCH64_RECORD_UNKNOWN;
3938 default:
3939 break;
3940 }
3941 esize = 8 << scale;
3942 if (replicate)
3943 for (sindex = 0; sindex < selem; sindex++)
3944 {
3945 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3946 reg_rt = (reg_rt + 1) % 32;
3947 }
3948 else
3949 {
3950 for (sindex = 0; sindex < selem; sindex++)
3951 {
3952 if (bit (aarch64_insn_r->aarch64_insn, 22))
3953 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3954 else
3955 {
3956 record_buf_mem[mem_index++] = esize / 8;
3957 record_buf_mem[mem_index++] = address + addr_offset;
3958 }
3959 addr_offset = addr_offset + (esize / 8);
3960 reg_rt = (reg_rt + 1) % 32;
3961 }
3962 }
3963 }
3964 /* Load/store multiple structure. */
3965 else
3966 {
3967 uint8_t selem, esize, rpt, elements;
3968 uint8_t eindex, rindex;
3969
3970 esize = 8 << size_bits;
3971 if (bit (aarch64_insn_r->aarch64_insn, 30))
3972 elements = 128 / esize;
3973 else
3974 elements = 64 / esize;
3975
3976 switch (opcode_bits)
3977 {
3978 /*LD/ST4 (4 Registers). */
3979 case 0:
3980 rpt = 1;
3981 selem = 4;
3982 break;
3983 /*LD/ST1 (4 Registers). */
3984 case 2:
3985 rpt = 4;
3986 selem = 1;
3987 break;
3988 /*LD/ST3 (3 Registers). */
3989 case 4:
3990 rpt = 1;
3991 selem = 3;
3992 break;
3993 /*LD/ST1 (3 Registers). */
3994 case 6:
3995 rpt = 3;
3996 selem = 1;
3997 break;
3998 /*LD/ST1 (1 Register). */
3999 case 7:
4000 rpt = 1;
4001 selem = 1;
4002 break;
4003 /*LD/ST2 (2 Registers). */
4004 case 8:
4005 rpt = 1;
4006 selem = 2;
4007 break;
4008 /*LD/ST1 (2 Registers). */
4009 case 10:
4010 rpt = 2;
4011 selem = 1;
4012 break;
4013 default:
4014 return AARCH64_RECORD_UNSUPPORTED;
4015 break;
4016 }
4017 for (rindex = 0; rindex < rpt; rindex++)
4018 for (eindex = 0; eindex < elements; eindex++)
4019 {
4020 uint8_t reg_tt, sindex;
4021 reg_tt = (reg_rt + rindex) % 32;
4022 for (sindex = 0; sindex < selem; sindex++)
4023 {
4024 if (bit (aarch64_insn_r->aarch64_insn, 22))
4025 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4026 else
4027 {
4028 record_buf_mem[mem_index++] = esize / 8;
4029 record_buf_mem[mem_index++] = address + addr_offset;
4030 }
4031 addr_offset = addr_offset + (esize / 8);
4032 reg_tt = (reg_tt + 1) % 32;
4033 }
4034 }
4035 }
4036
4037 if (bit (aarch64_insn_r->aarch64_insn, 23))
4038 record_buf[reg_index++] = reg_rn;
4039
4040 aarch64_insn_r->reg_rec_count = reg_index;
4041 aarch64_insn_r->mem_rec_count = mem_index / 2;
4042 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4043 record_buf_mem);
4044 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4045 record_buf);
4046 return AARCH64_RECORD_SUCCESS;
4047 }
4048
4049 /* Record handler for load and store instructions. */
4050
4051 static unsigned int
4052 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4053 {
4054 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4055 uint8_t insn_bit23, insn_bit21;
4056 uint8_t opc, size_bits, ld_flag, vector_flag;
4057 uint32_t reg_rn, reg_rt, reg_rt2;
4058 uint64_t datasize, offset;
4059 uint32_t record_buf[8];
4060 uint64_t record_buf_mem[8];
4061 CORE_ADDR address;
4062
4063 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4064 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4065 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4066 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4067 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4068 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4069 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4070 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4071 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4072 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4073 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4074
4075 /* Load/store exclusive. */
4076 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4077 {
4078 if (record_debug)
4079 debug_printf ("Process record: load/store exclusive\n");
4080
4081 if (ld_flag)
4082 {
4083 record_buf[0] = reg_rt;
4084 aarch64_insn_r->reg_rec_count = 1;
4085 if (insn_bit21)
4086 {
4087 record_buf[1] = reg_rt2;
4088 aarch64_insn_r->reg_rec_count = 2;
4089 }
4090 }
4091 else
4092 {
4093 if (insn_bit21)
4094 datasize = (8 << size_bits) * 2;
4095 else
4096 datasize = (8 << size_bits);
4097 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4098 &address);
4099 record_buf_mem[0] = datasize / 8;
4100 record_buf_mem[1] = address;
4101 aarch64_insn_r->mem_rec_count = 1;
4102 if (!insn_bit23)
4103 {
4104 /* Save register rs. */
4105 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4106 aarch64_insn_r->reg_rec_count = 1;
4107 }
4108 }
4109 }
4110 /* Load register (literal) instructions decoding. */
4111 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4112 {
4113 if (record_debug)
4114 debug_printf ("Process record: load register (literal)\n");
4115 if (vector_flag)
4116 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4117 else
4118 record_buf[0] = reg_rt;
4119 aarch64_insn_r->reg_rec_count = 1;
4120 }
4121 /* All types of load/store pair instructions decoding. */
4122 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4123 {
4124 if (record_debug)
4125 debug_printf ("Process record: load/store pair\n");
4126
4127 if (ld_flag)
4128 {
4129 if (vector_flag)
4130 {
4131 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4132 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4133 }
4134 else
4135 {
4136 record_buf[0] = reg_rt;
4137 record_buf[1] = reg_rt2;
4138 }
4139 aarch64_insn_r->reg_rec_count = 2;
4140 }
4141 else
4142 {
4143 uint16_t imm7_off;
4144 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4145 if (!vector_flag)
4146 size_bits = size_bits >> 1;
4147 datasize = 8 << (2 + size_bits);
4148 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4149 offset = offset << (2 + size_bits);
4150 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4151 &address);
4152 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4153 {
4154 if (imm7_off & 0x40)
4155 address = address - offset;
4156 else
4157 address = address + offset;
4158 }
4159
4160 record_buf_mem[0] = datasize / 8;
4161 record_buf_mem[1] = address;
4162 record_buf_mem[2] = datasize / 8;
4163 record_buf_mem[3] = address + (datasize / 8);
4164 aarch64_insn_r->mem_rec_count = 2;
4165 }
4166 if (bit (aarch64_insn_r->aarch64_insn, 23))
4167 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4168 }
4169 /* Load/store register (unsigned immediate) instructions. */
4170 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4171 {
4172 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4173 if (!(opc >> 1))
4174 {
4175 if (opc & 0x01)
4176 ld_flag = 0x01;
4177 else
4178 ld_flag = 0x0;
4179 }
4180 else
4181 {
4182 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4183 {
4184 /* PRFM (immediate) */
4185 return AARCH64_RECORD_SUCCESS;
4186 }
4187 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4188 {
4189 /* LDRSW (immediate) */
4190 ld_flag = 0x1;
4191 }
4192 else
4193 {
4194 if (opc & 0x01)
4195 ld_flag = 0x01;
4196 else
4197 ld_flag = 0x0;
4198 }
4199 }
4200
4201 if (record_debug)
4202 {
4203 debug_printf ("Process record: load/store (unsigned immediate):"
4204 " size %x V %d opc %x\n", size_bits, vector_flag,
4205 opc);
4206 }
4207
4208 if (!ld_flag)
4209 {
4210 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4211 datasize = 8 << size_bits;
4212 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4213 &address);
4214 offset = offset << size_bits;
4215 address = address + offset;
4216
4217 record_buf_mem[0] = datasize >> 3;
4218 record_buf_mem[1] = address;
4219 aarch64_insn_r->mem_rec_count = 1;
4220 }
4221 else
4222 {
4223 if (vector_flag)
4224 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4225 else
4226 record_buf[0] = reg_rt;
4227 aarch64_insn_r->reg_rec_count = 1;
4228 }
4229 }
4230 /* Load/store register (register offset) instructions. */
4231 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4232 && insn_bits10_11 == 0x02 && insn_bit21)
4233 {
4234 if (record_debug)
4235 debug_printf ("Process record: load/store (register offset)\n");
4236 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4237 if (!(opc >> 1))
4238 if (opc & 0x01)
4239 ld_flag = 0x01;
4240 else
4241 ld_flag = 0x0;
4242 else
4243 if (size_bits != 0x03)
4244 ld_flag = 0x01;
4245 else
4246 return AARCH64_RECORD_UNKNOWN;
4247
4248 if (!ld_flag)
4249 {
4250 ULONGEST reg_rm_val;
4251
4252 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4253 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4254 if (bit (aarch64_insn_r->aarch64_insn, 12))
4255 offset = reg_rm_val << size_bits;
4256 else
4257 offset = reg_rm_val;
4258 datasize = 8 << size_bits;
4259 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4260 &address);
4261 address = address + offset;
4262 record_buf_mem[0] = datasize >> 3;
4263 record_buf_mem[1] = address;
4264 aarch64_insn_r->mem_rec_count = 1;
4265 }
4266 else
4267 {
4268 if (vector_flag)
4269 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4270 else
4271 record_buf[0] = reg_rt;
4272 aarch64_insn_r->reg_rec_count = 1;
4273 }
4274 }
4275 /* Load/store register (immediate and unprivileged) instructions. */
4276 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4277 && !insn_bit21)
4278 {
4279 if (record_debug)
4280 {
4281 debug_printf ("Process record: load/store "
4282 "(immediate and unprivileged)\n");
4283 }
4284 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4285 if (!(opc >> 1))
4286 if (opc & 0x01)
4287 ld_flag = 0x01;
4288 else
4289 ld_flag = 0x0;
4290 else
4291 if (size_bits != 0x03)
4292 ld_flag = 0x01;
4293 else
4294 return AARCH64_RECORD_UNKNOWN;
4295
4296 if (!ld_flag)
4297 {
4298 uint16_t imm9_off;
4299 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4300 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4301 datasize = 8 << size_bits;
4302 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4303 &address);
4304 if (insn_bits10_11 != 0x01)
4305 {
4306 if (imm9_off & 0x0100)
4307 address = address - offset;
4308 else
4309 address = address + offset;
4310 }
4311 record_buf_mem[0] = datasize >> 3;
4312 record_buf_mem[1] = address;
4313 aarch64_insn_r->mem_rec_count = 1;
4314 }
4315 else
4316 {
4317 if (vector_flag)
4318 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4319 else
4320 record_buf[0] = reg_rt;
4321 aarch64_insn_r->reg_rec_count = 1;
4322 }
4323 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4324 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4325 }
4326 /* Advanced SIMD load/store instructions. */
4327 else
4328 return aarch64_record_asimd_load_store (aarch64_insn_r);
4329
4330 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4331 record_buf_mem);
4332 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4333 record_buf);
4334 return AARCH64_RECORD_SUCCESS;
4335 }
4336
4337 /* Record handler for data processing SIMD and floating point instructions. */
4338
4339 static unsigned int
4340 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4341 {
4342 uint8_t insn_bit21, opcode, rmode, reg_rd;
4343 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4344 uint8_t insn_bits11_14;
4345 uint32_t record_buf[2];
4346
4347 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4348 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4349 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4350 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4351 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4352 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4353 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4354 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4355 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4356
4357 if (record_debug)
4358 debug_printf ("Process record: data processing SIMD/FP: ");
4359
4360 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4361 {
4362 /* Floating point - fixed point conversion instructions. */
4363 if (!insn_bit21)
4364 {
4365 if (record_debug)
4366 debug_printf ("FP - fixed point conversion");
4367
4368 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4369 record_buf[0] = reg_rd;
4370 else
4371 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4372 }
4373 /* Floating point - conditional compare instructions. */
4374 else if (insn_bits10_11 == 0x01)
4375 {
4376 if (record_debug)
4377 debug_printf ("FP - conditional compare");
4378
4379 record_buf[0] = AARCH64_CPSR_REGNUM;
4380 }
4381 /* Floating point - data processing (2-source) and
4382 conditional select instructions. */
4383 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4384 {
4385 if (record_debug)
4386 debug_printf ("FP - DP (2-source)");
4387
4388 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4389 }
4390 else if (insn_bits10_11 == 0x00)
4391 {
4392 /* Floating point - immediate instructions. */
4393 if ((insn_bits12_15 & 0x01) == 0x01
4394 || (insn_bits12_15 & 0x07) == 0x04)
4395 {
4396 if (record_debug)
4397 debug_printf ("FP - immediate");
4398 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4399 }
4400 /* Floating point - compare instructions. */
4401 else if ((insn_bits12_15 & 0x03) == 0x02)
4402 {
4403 if (record_debug)
4404 debug_printf ("FP - immediate");
4405 record_buf[0] = AARCH64_CPSR_REGNUM;
4406 }
4407 /* Floating point - integer conversions instructions. */
4408 else if (insn_bits12_15 == 0x00)
4409 {
4410 /* Convert float to integer instruction. */
4411 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4412 {
4413 if (record_debug)
4414 debug_printf ("float to int conversion");
4415
4416 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4417 }
4418 /* Convert integer to float instruction. */
4419 else if ((opcode >> 1) == 0x01 && !rmode)
4420 {
4421 if (record_debug)
4422 debug_printf ("int to float conversion");
4423
4424 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4425 }
4426 /* Move float to integer instruction. */
4427 else if ((opcode >> 1) == 0x03)
4428 {
4429 if (record_debug)
4430 debug_printf ("move float to int");
4431
4432 if (!(opcode & 0x01))
4433 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4434 else
4435 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4436 }
4437 else
4438 return AARCH64_RECORD_UNKNOWN;
4439 }
4440 else
4441 return AARCH64_RECORD_UNKNOWN;
4442 }
4443 else
4444 return AARCH64_RECORD_UNKNOWN;
4445 }
4446 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4447 {
4448 if (record_debug)
4449 debug_printf ("SIMD copy");
4450
4451 /* Advanced SIMD copy instructions. */
4452 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4453 && !bit (aarch64_insn_r->aarch64_insn, 15)
4454 && bit (aarch64_insn_r->aarch64_insn, 10))
4455 {
4456 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4457 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4458 else
4459 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4460 }
4461 else
4462 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4463 }
4464 /* All remaining floating point or advanced SIMD instructions. */
4465 else
4466 {
4467 if (record_debug)
4468 debug_printf ("all remain");
4469
4470 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4471 }
4472
4473 if (record_debug)
4474 debug_printf ("\n");
4475
4476 /* Record the V/X register. */
4477 aarch64_insn_r->reg_rec_count++;
4478
4479 /* Some of these instructions may set bits in the FPSR, so record it
4480 too. */
4481 record_buf[1] = AARCH64_FPSR_REGNUM;
4482 aarch64_insn_r->reg_rec_count++;
4483
4484 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4485 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4486 record_buf);
4487 return AARCH64_RECORD_SUCCESS;
4488 }
4489
4490 /* Decodes insns type and invokes its record handler. */
4491
4492 static unsigned int
4493 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4494 {
4495 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4496
4497 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4498 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4499 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4500 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4501
4502 /* Data processing - immediate instructions. */
4503 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4504 return aarch64_record_data_proc_imm (aarch64_insn_r);
4505
4506 /* Branch, exception generation and system instructions. */
4507 if (ins_bit26 && !ins_bit27 && ins_bit28)
4508 return aarch64_record_branch_except_sys (aarch64_insn_r);
4509
4510 /* Load and store instructions. */
4511 if (!ins_bit25 && ins_bit27)
4512 return aarch64_record_load_store (aarch64_insn_r);
4513
4514 /* Data processing - register instructions. */
4515 if (ins_bit25 && !ins_bit26 && ins_bit27)
4516 return aarch64_record_data_proc_reg (aarch64_insn_r);
4517
4518 /* Data processing - SIMD and floating point instructions. */
4519 if (ins_bit25 && ins_bit26 && ins_bit27)
4520 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4521
4522 return AARCH64_RECORD_UNSUPPORTED;
4523 }
4524
4525 /* Cleans up local record registers and memory allocations. */
4526
4527 static void
4528 deallocate_reg_mem (insn_decode_record *record)
4529 {
4530 xfree (record->aarch64_regs);
4531 xfree (record->aarch64_mems);
4532 }
4533
4534 #if GDB_SELF_TEST
4535 namespace selftests {
4536
4537 static void
4538 aarch64_process_record_test (void)
4539 {
4540 struct gdbarch_info info;
4541 uint32_t ret;
4542
4543 gdbarch_info_init (&info);
4544 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4545
4546 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4547 SELF_CHECK (gdbarch != NULL);
4548
4549 insn_decode_record aarch64_record;
4550
4551 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4552 aarch64_record.regcache = NULL;
4553 aarch64_record.this_addr = 0;
4554 aarch64_record.gdbarch = gdbarch;
4555
4556 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4557 aarch64_record.aarch64_insn = 0xf9800020;
4558 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4559 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4560 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4561 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4562
4563 deallocate_reg_mem (&aarch64_record);
4564 }
4565
4566 } // namespace selftests
4567 #endif /* GDB_SELF_TEST */
4568
4569 /* Parse the current instruction and record the values of the registers and
4570 memory that will be changed in current instruction to record_arch_list
4571 return -1 if something is wrong. */
4572
4573 int
4574 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4575 CORE_ADDR insn_addr)
4576 {
4577 uint32_t rec_no = 0;
4578 uint8_t insn_size = 4;
4579 uint32_t ret = 0;
4580 gdb_byte buf[insn_size];
4581 insn_decode_record aarch64_record;
4582
4583 memset (&buf[0], 0, insn_size);
4584 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4585 target_read_memory (insn_addr, &buf[0], insn_size);
4586 aarch64_record.aarch64_insn
4587 = (uint32_t) extract_unsigned_integer (&buf[0],
4588 insn_size,
4589 gdbarch_byte_order (gdbarch));
4590 aarch64_record.regcache = regcache;
4591 aarch64_record.this_addr = insn_addr;
4592 aarch64_record.gdbarch = gdbarch;
4593
4594 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4595 if (ret == AARCH64_RECORD_UNSUPPORTED)
4596 {
4597 printf_unfiltered (_("Process record does not support instruction "
4598 "0x%0x at address %s.\n"),
4599 aarch64_record.aarch64_insn,
4600 paddress (gdbarch, insn_addr));
4601 ret = -1;
4602 }
4603
4604 if (0 == ret)
4605 {
4606 /* Record registers. */
4607 record_full_arch_list_add_reg (aarch64_record.regcache,
4608 AARCH64_PC_REGNUM);
4609 /* Always record register CPSR. */
4610 record_full_arch_list_add_reg (aarch64_record.regcache,
4611 AARCH64_CPSR_REGNUM);
4612 if (aarch64_record.aarch64_regs)
4613 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4614 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4615 aarch64_record.aarch64_regs[rec_no]))
4616 ret = -1;
4617
4618 /* Record memories. */
4619 if (aarch64_record.aarch64_mems)
4620 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4621 if (record_full_arch_list_add_mem
4622 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4623 aarch64_record.aarch64_mems[rec_no].len))
4624 ret = -1;
4625
4626 if (record_full_arch_list_add_end ())
4627 ret = -1;
4628 }
4629
4630 deallocate_reg_mem (&aarch64_record);
4631 return ret;
4632 }
This page took 0.1177 seconds and 5 git commands to generate.