aarch64: Add support for bfloat16 in gdb.
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 /* AArch64 prologue cache structure. */
176 struct aarch64_prologue_cache
177 {
178 /* The program counter at the start of the function. It is used to
179 identify this frame as a prologue frame. */
180 CORE_ADDR func;
181
182 /* The program counter at the time this frame was created; i.e. where
183 this function was called from. It is used to identify this frame as a
184 stub frame. */
185 CORE_ADDR prev_pc;
186
187 /* The stack pointer at the time this frame was created; i.e. the
188 caller's stack pointer when this function was called. It is used
189 to identify this frame. */
190 CORE_ADDR prev_sp;
191
192 /* Is the target available to read from? */
193 int available_p;
194
195 /* The frame base for this frame is just prev_sp - frame size.
196 FRAMESIZE is the distance from the frame pointer to the
197 initial stack pointer. */
198 int framesize;
199
200 /* The register used to hold the frame pointer for this frame. */
201 int framereg;
202
203 /* Saved register offsets. */
204 trad_frame_saved_reg *saved_regs;
205 };
206
207 static void
208 show_aarch64_debug (struct ui_file *file, int from_tty,
209 struct cmd_list_element *c, const char *value)
210 {
211 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
212 }
213
214 namespace {
215
216 /* Abstract instruction reader. */
217
218 class abstract_instruction_reader
219 {
220 public:
221 /* Read in one instruction. */
222 virtual ULONGEST read (CORE_ADDR memaddr, int len,
223 enum bfd_endian byte_order) = 0;
224 };
225
226 /* Instruction reader from real target. */
227
228 class instruction_reader : public abstract_instruction_reader
229 {
230 public:
231 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
232 override
233 {
234 return read_code_unsigned_integer (memaddr, len, byte_order);
235 }
236 };
237
238 } // namespace
239
240 /* If address signing is enabled, mask off the signature bits from the link
241 register, which is passed by value in ADDR, using the register values in
242 THIS_FRAME. */
243
244 static CORE_ADDR
245 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
246 struct frame_info *this_frame, CORE_ADDR addr)
247 {
248 if (tdep->has_pauth ()
249 && frame_unwind_register_unsigned (this_frame,
250 tdep->pauth_ra_state_regnum))
251 {
252 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
253 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
254 addr = addr & ~cmask;
255
256 /* Record in the frame that the link register required unmasking. */
257 set_frame_previous_pc_masked (this_frame);
258 }
259
260 return addr;
261 }
262
263 /* Implement the "get_pc_address_flags" gdbarch method. */
264
265 static std::string
266 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
267 {
268 if (pc != 0 && get_frame_pc_masked (frame))
269 return "PAC";
270
271 return "";
272 }
273
274 /* Analyze a prologue, looking for a recognizable stack frame
275 and frame pointer. Scan until we encounter a store that could
276 clobber the stack frame unexpectedly, or an unknown instruction. */
277
278 static CORE_ADDR
279 aarch64_analyze_prologue (struct gdbarch *gdbarch,
280 CORE_ADDR start, CORE_ADDR limit,
281 struct aarch64_prologue_cache *cache,
282 abstract_instruction_reader& reader)
283 {
284 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
285 int i;
286
287 /* Whether the stack has been set. This should be true when we notice a SP
288 to FP move or if we are using the SP as the base register for storing
289 data, in case the FP is ommitted. */
290 bool seen_stack_set = false;
291
292 /* Track X registers and D registers in prologue. */
293 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
294
295 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
296 regs[i] = pv_register (i, 0);
297 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
298
299 for (; start < limit; start += 4)
300 {
301 uint32_t insn;
302 aarch64_inst inst;
303
304 insn = reader.read (start, 4, byte_order_for_code);
305
306 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
307 break;
308
309 if (inst.opcode->iclass == addsub_imm
310 && (inst.opcode->op == OP_ADD
311 || strcmp ("sub", inst.opcode->name) == 0))
312 {
313 unsigned rd = inst.operands[0].reg.regno;
314 unsigned rn = inst.operands[1].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
320
321 if (inst.opcode->op == OP_ADD)
322 {
323 regs[rd] = pv_add_constant (regs[rn],
324 inst.operands[2].imm.value);
325 }
326 else
327 {
328 regs[rd] = pv_add_constant (regs[rn],
329 -inst.operands[2].imm.value);
330 }
331
332 /* Did we move SP to FP? */
333 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
334 seen_stack_set = true;
335 }
336 else if (inst.opcode->iclass == pcreladdr
337 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
338 {
339 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
340 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
341
342 regs[inst.operands[0].reg.regno] = pv_unknown ();
343 }
344 else if (inst.opcode->iclass == branch_imm)
345 {
346 /* Stop analysis on branch. */
347 break;
348 }
349 else if (inst.opcode->iclass == condbranch)
350 {
351 /* Stop analysis on branch. */
352 break;
353 }
354 else if (inst.opcode->iclass == branch_reg)
355 {
356 /* Stop analysis on branch. */
357 break;
358 }
359 else if (inst.opcode->iclass == compbranch)
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else if (inst.opcode->op == OP_MOVZ)
365 {
366 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
367
368 /* If this shows up before we set the stack, keep going. Otherwise
369 stop the analysis. */
370 if (seen_stack_set)
371 break;
372
373 regs[inst.operands[0].reg.regno] = pv_unknown ();
374 }
375 else if (inst.opcode->iclass == log_shift
376 && strcmp (inst.opcode->name, "orr") == 0)
377 {
378 unsigned rd = inst.operands[0].reg.regno;
379 unsigned rn = inst.operands[1].reg.regno;
380 unsigned rm = inst.operands[2].reg.regno;
381
382 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
383 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
385
386 if (inst.operands[2].shifter.amount == 0
387 && rn == AARCH64_SP_REGNUM)
388 regs[rd] = regs[rm];
389 else
390 {
391 aarch64_debug_printf ("prologue analysis gave up "
392 "addr=%s opcode=0x%x (orr x register)",
393 core_addr_to_string_nz (start), insn);
394
395 break;
396 }
397 }
398 else if (inst.opcode->op == OP_STUR)
399 {
400 unsigned rt = inst.operands[0].reg.regno;
401 unsigned rn = inst.operands[1].addr.base_regno;
402 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
403
404 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
405 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
406 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
407 gdb_assert (!inst.operands[1].addr.offset.is_reg);
408
409 stack.store
410 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
411 size, regs[rt]);
412
413 /* Are we storing with SP as a base? */
414 if (rn == AARCH64_SP_REGNUM)
415 seen_stack_set = true;
416 }
417 else if ((inst.opcode->iclass == ldstpair_off
418 || (inst.opcode->iclass == ldstpair_indexed
419 && inst.operands[2].addr.preind))
420 && strcmp ("stp", inst.opcode->name) == 0)
421 {
422 /* STP with addressing mode Pre-indexed and Base register. */
423 unsigned rt1;
424 unsigned rt2;
425 unsigned rn = inst.operands[2].addr.base_regno;
426 int32_t imm = inst.operands[2].addr.offset.imm;
427 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
428
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
430 || inst.operands[0].type == AARCH64_OPND_Ft);
431 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
432 || inst.operands[1].type == AARCH64_OPND_Ft2);
433 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
434 gdb_assert (!inst.operands[2].addr.offset.is_reg);
435
436 /* If recording this store would invalidate the store area
437 (perhaps because rn is not known) then we should abandon
438 further prologue analysis. */
439 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
440 break;
441
442 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
443 break;
444
445 rt1 = inst.operands[0].reg.regno;
446 rt2 = inst.operands[1].reg.regno;
447 if (inst.operands[0].type == AARCH64_OPND_Ft)
448 {
449 rt1 += AARCH64_X_REGISTER_COUNT;
450 rt2 += AARCH64_X_REGISTER_COUNT;
451 }
452
453 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
454 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
455
456 if (inst.operands[2].addr.writeback)
457 regs[rn] = pv_add_constant (regs[rn], imm);
458
459 /* Ignore the instruction that allocates stack space and sets
460 the SP. */
461 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
462 seen_stack_set = true;
463 }
464 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
465 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
466 && (inst.opcode->op == OP_STR_POS
467 || inst.opcode->op == OP_STRF_POS)))
468 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
469 && strcmp ("str", inst.opcode->name) == 0)
470 {
471 /* STR (immediate) */
472 unsigned int rt = inst.operands[0].reg.regno;
473 int32_t imm = inst.operands[1].addr.offset.imm;
474 unsigned int rn = inst.operands[1].addr.base_regno;
475 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
476 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
477 || inst.operands[0].type == AARCH64_OPND_Ft);
478
479 if (inst.operands[0].type == AARCH64_OPND_Ft)
480 rt += AARCH64_X_REGISTER_COUNT;
481
482 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
483 if (inst.operands[1].addr.writeback)
484 regs[rn] = pv_add_constant (regs[rn], imm);
485
486 /* Are we storing with SP as a base? */
487 if (rn == AARCH64_SP_REGNUM)
488 seen_stack_set = true;
489 }
490 else if (inst.opcode->iclass == testbranch)
491 {
492 /* Stop analysis on branch. */
493 break;
494 }
495 else if (inst.opcode->iclass == ic_system)
496 {
497 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
498 int ra_state_val = 0;
499
500 if (insn == 0xd503233f /* paciasp. */
501 || insn == 0xd503237f /* pacibsp. */)
502 {
503 /* Return addresses are mangled. */
504 ra_state_val = 1;
505 }
506 else if (insn == 0xd50323bf /* autiasp. */
507 || insn == 0xd50323ff /* autibsp. */)
508 {
509 /* Return addresses are not mangled. */
510 ra_state_val = 0;
511 }
512 else
513 {
514 aarch64_debug_printf ("prologue analysis gave up addr=%s"
515 " opcode=0x%x (iclass)",
516 core_addr_to_string_nz (start), insn);
517 break;
518 }
519
520 if (tdep->has_pauth () && cache != nullptr)
521 trad_frame_set_value (cache->saved_regs,
522 tdep->pauth_ra_state_regnum,
523 ra_state_val);
524 }
525 else
526 {
527 aarch64_debug_printf ("prologue analysis gave up addr=%s"
528 " opcode=0x%x",
529 core_addr_to_string_nz (start), insn);
530
531 break;
532 }
533 }
534
535 if (cache == NULL)
536 return start;
537
538 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
539 {
540 /* Frame pointer is fp. Frame size is constant. */
541 cache->framereg = AARCH64_FP_REGNUM;
542 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
543 }
544 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
545 {
546 /* Try the stack pointer. */
547 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
548 cache->framereg = AARCH64_SP_REGNUM;
549 }
550 else
551 {
552 /* We're just out of luck. We don't know where the frame is. */
553 cache->framereg = -1;
554 cache->framesize = 0;
555 }
556
557 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
558 {
559 CORE_ADDR offset;
560
561 if (stack.find_reg (gdbarch, i, &offset))
562 cache->saved_regs[i].set_addr (offset);
563 }
564
565 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
566 {
567 int regnum = gdbarch_num_regs (gdbarch);
568 CORE_ADDR offset;
569
570 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
571 &offset))
572 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
573 }
574
575 return start;
576 }
577
578 static CORE_ADDR
579 aarch64_analyze_prologue (struct gdbarch *gdbarch,
580 CORE_ADDR start, CORE_ADDR limit,
581 struct aarch64_prologue_cache *cache)
582 {
583 instruction_reader reader;
584
585 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
586 reader);
587 }
588
589 #if GDB_SELF_TEST
590
591 namespace selftests {
592
593 /* Instruction reader from manually cooked instruction sequences. */
594
595 class instruction_reader_test : public abstract_instruction_reader
596 {
597 public:
598 template<size_t SIZE>
599 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
600 : m_insns (insns), m_insns_size (SIZE)
601 {}
602
603 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
604 override
605 {
606 SELF_CHECK (len == 4);
607 SELF_CHECK (memaddr % 4 == 0);
608 SELF_CHECK (memaddr / 4 < m_insns_size);
609
610 return m_insns[memaddr / 4];
611 }
612
613 private:
614 const uint32_t *m_insns;
615 size_t m_insns_size;
616 };
617
618 static void
619 aarch64_analyze_prologue_test (void)
620 {
621 struct gdbarch_info info;
622
623 gdbarch_info_init (&info);
624 info.bfd_arch_info = bfd_scan_arch ("aarch64");
625
626 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
627 SELF_CHECK (gdbarch != NULL);
628
629 struct aarch64_prologue_cache cache;
630 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
631
632 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
633
634 /* Test the simple prologue in which frame pointer is used. */
635 {
636 static const uint32_t insns[] = {
637 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
638 0x910003fd, /* mov x29, sp */
639 0x97ffffe6, /* bl 0x400580 */
640 };
641 instruction_reader_test reader (insns);
642
643 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
644 SELF_CHECK (end == 4 * 2);
645
646 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
647 SELF_CHECK (cache.framesize == 272);
648
649 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
650 {
651 if (i == AARCH64_FP_REGNUM)
652 SELF_CHECK (cache.saved_regs[i].addr () == -272);
653 else if (i == AARCH64_LR_REGNUM)
654 SELF_CHECK (cache.saved_regs[i].addr () == -264);
655 else
656 SELF_CHECK (cache.saved_regs[i].is_realreg ());
657 }
658
659 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
660 {
661 int regnum = gdbarch_num_regs (gdbarch);
662
663 SELF_CHECK (cache.saved_regs[i + regnum
664 + AARCH64_D0_REGNUM].is_realreg ());
665 }
666 }
667
668 /* Test a prologue in which STR is used and frame pointer is not
669 used. */
670 {
671 static const uint32_t insns[] = {
672 0xf81d0ff3, /* str x19, [sp, #-48]! */
673 0xb9002fe0, /* str w0, [sp, #44] */
674 0xf90013e1, /* str x1, [sp, #32]*/
675 0xfd000fe0, /* str d0, [sp, #24] */
676 0xaa0203f3, /* mov x19, x2 */
677 0xf94013e0, /* ldr x0, [sp, #32] */
678 };
679 instruction_reader_test reader (insns);
680
681 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
682 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
683
684 SELF_CHECK (end == 4 * 5);
685
686 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
687 SELF_CHECK (cache.framesize == 48);
688
689 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
690 {
691 if (i == 1)
692 SELF_CHECK (cache.saved_regs[i].addr () == -16);
693 else if (i == 19)
694 SELF_CHECK (cache.saved_regs[i].addr () == -48);
695 else
696 SELF_CHECK (cache.saved_regs[i].is_realreg ());
697 }
698
699 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
700 {
701 int regnum = gdbarch_num_regs (gdbarch);
702
703 if (i == 0)
704 SELF_CHECK (cache.saved_regs[i + regnum
705 + AARCH64_D0_REGNUM].addr ()
706 == -24);
707 else
708 SELF_CHECK (cache.saved_regs[i + regnum
709 + AARCH64_D0_REGNUM].is_realreg ());
710 }
711 }
712
713 /* Test handling of movz before setting the frame pointer. */
714 {
715 static const uint32_t insns[] = {
716 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
717 0x52800020, /* mov w0, #0x1 */
718 0x910003fd, /* mov x29, sp */
719 0x528000a2, /* mov w2, #0x5 */
720 0x97fffff8, /* bl 6e4 */
721 };
722
723 instruction_reader_test reader (insns);
724
725 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
726 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
727
728 /* We should stop at the 4th instruction. */
729 SELF_CHECK (end == (4 - 1) * 4);
730 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
731 SELF_CHECK (cache.framesize == 16);
732 }
733
734 /* Test handling of movz/stp when using the stack pointer as frame
735 pointer. */
736 {
737 static const uint32_t insns[] = {
738 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
739 0x52800020, /* mov w0, #0x1 */
740 0x290207e0, /* stp w0, w1, [sp, #16] */
741 0xa9018fe2, /* stp x2, x3, [sp, #24] */
742 0x528000a2, /* mov w2, #0x5 */
743 0x97fffff8, /* bl 6e4 */
744 };
745
746 instruction_reader_test reader (insns);
747
748 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
749 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
750
751 /* We should stop at the 5th instruction. */
752 SELF_CHECK (end == (5 - 1) * 4);
753 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
754 SELF_CHECK (cache.framesize == 64);
755 }
756
757 /* Test handling of movz/str when using the stack pointer as frame
758 pointer */
759 {
760 static const uint32_t insns[] = {
761 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
762 0x52800020, /* mov w0, #0x1 */
763 0xb9002be4, /* str w4, [sp, #40] */
764 0xf9001be5, /* str x5, [sp, #48] */
765 0x528000a2, /* mov w2, #0x5 */
766 0x97fffff8, /* bl 6e4 */
767 };
768
769 instruction_reader_test reader (insns);
770
771 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
772 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
773
774 /* We should stop at the 5th instruction. */
775 SELF_CHECK (end == (5 - 1) * 4);
776 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
777 SELF_CHECK (cache.framesize == 64);
778 }
779
780 /* Test handling of movz/stur when using the stack pointer as frame
781 pointer. */
782 {
783 static const uint32_t insns[] = {
784 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
785 0x52800020, /* mov w0, #0x1 */
786 0xb80343e6, /* stur w6, [sp, #52] */
787 0xf80383e7, /* stur x7, [sp, #56] */
788 0x528000a2, /* mov w2, #0x5 */
789 0x97fffff8, /* bl 6e4 */
790 };
791
792 instruction_reader_test reader (insns);
793
794 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
795 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
796
797 /* We should stop at the 5th instruction. */
798 SELF_CHECK (end == (5 - 1) * 4);
799 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
800 SELF_CHECK (cache.framesize == 64);
801 }
802
803 /* Test handling of movz when there is no frame pointer set or no stack
804 pointer used. */
805 {
806 static const uint32_t insns[] = {
807 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
808 0x52800020, /* mov w0, #0x1 */
809 0x528000a2, /* mov w2, #0x5 */
810 0x97fffff8, /* bl 6e4 */
811 };
812
813 instruction_reader_test reader (insns);
814
815 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
816 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
817
818 /* We should stop at the 4th instruction. */
819 SELF_CHECK (end == (4 - 1) * 4);
820 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
821 SELF_CHECK (cache.framesize == 16);
822 }
823
824 /* Test a prologue in which there is a return address signing instruction. */
825 if (tdep->has_pauth ())
826 {
827 static const uint32_t insns[] = {
828 0xd503233f, /* paciasp */
829 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
830 0x910003fd, /* mov x29, sp */
831 0xf801c3f3, /* str x19, [sp, #28] */
832 0xb9401fa0, /* ldr x19, [x29, #28] */
833 };
834 instruction_reader_test reader (insns);
835
836 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
837 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
838 reader);
839
840 SELF_CHECK (end == 4 * 4);
841 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
842 SELF_CHECK (cache.framesize == 48);
843
844 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
845 {
846 if (i == 19)
847 SELF_CHECK (cache.saved_regs[i].addr () == -20);
848 else if (i == AARCH64_FP_REGNUM)
849 SELF_CHECK (cache.saved_regs[i].addr () == -48);
850 else if (i == AARCH64_LR_REGNUM)
851 SELF_CHECK (cache.saved_regs[i].addr () == -40);
852 else
853 SELF_CHECK (cache.saved_regs[i].is_realreg ());
854 }
855
856 if (tdep->has_pauth ())
857 {
858 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
859 tdep->pauth_ra_state_regnum));
860 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr ()
861 == 1);
862 }
863 }
864 }
865 } // namespace selftests
866 #endif /* GDB_SELF_TEST */
867
868 /* Implement the "skip_prologue" gdbarch method. */
869
870 static CORE_ADDR
871 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
872 {
873 CORE_ADDR func_addr, limit_pc;
874
875 /* See if we can determine the end of the prologue via the symbol
876 table. If so, then return either PC, or the PC after the
877 prologue, whichever is greater. */
878 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
879 {
880 CORE_ADDR post_prologue_pc
881 = skip_prologue_using_sal (gdbarch, func_addr);
882
883 if (post_prologue_pc != 0)
884 return std::max (pc, post_prologue_pc);
885 }
886
887 /* Can't determine prologue from the symbol table, need to examine
888 instructions. */
889
890 /* Find an upper limit on the function prologue using the debug
891 information. If the debug information could not be used to
892 provide that bound, then use an arbitrary large number as the
893 upper bound. */
894 limit_pc = skip_prologue_using_sal (gdbarch, pc);
895 if (limit_pc == 0)
896 limit_pc = pc + 128; /* Magic. */
897
898 /* Try disassembling prologue. */
899 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
900 }
901
902 /* Scan the function prologue for THIS_FRAME and populate the prologue
903 cache CACHE. */
904
905 static void
906 aarch64_scan_prologue (struct frame_info *this_frame,
907 struct aarch64_prologue_cache *cache)
908 {
909 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
910 CORE_ADDR prologue_start;
911 CORE_ADDR prologue_end;
912 CORE_ADDR prev_pc = get_frame_pc (this_frame);
913 struct gdbarch *gdbarch = get_frame_arch (this_frame);
914
915 cache->prev_pc = prev_pc;
916
917 /* Assume we do not find a frame. */
918 cache->framereg = -1;
919 cache->framesize = 0;
920
921 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
922 &prologue_end))
923 {
924 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
925
926 if (sal.line == 0)
927 {
928 /* No line info so use the current PC. */
929 prologue_end = prev_pc;
930 }
931 else if (sal.end < prologue_end)
932 {
933 /* The next line begins after the function end. */
934 prologue_end = sal.end;
935 }
936
937 prologue_end = std::min (prologue_end, prev_pc);
938 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
939 }
940 else
941 {
942 CORE_ADDR frame_loc;
943
944 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
945 if (frame_loc == 0)
946 return;
947
948 cache->framereg = AARCH64_FP_REGNUM;
949 cache->framesize = 16;
950 cache->saved_regs[29].set_addr (0);
951 cache->saved_regs[30].set_addr (8);
952 }
953 }
954
955 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
956 function may throw an exception if the inferior's registers or memory is
957 not available. */
958
959 static void
960 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
961 struct aarch64_prologue_cache *cache)
962 {
963 CORE_ADDR unwound_fp;
964 int reg;
965
966 aarch64_scan_prologue (this_frame, cache);
967
968 if (cache->framereg == -1)
969 return;
970
971 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
972 if (unwound_fp == 0)
973 return;
974
975 cache->prev_sp = unwound_fp + cache->framesize;
976
977 /* Calculate actual addresses of saved registers using offsets
978 determined by aarch64_analyze_prologue. */
979 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
980 if (trad_frame_addr_p (cache->saved_regs, reg))
981 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
982 + cache->prev_sp);
983
984 cache->func = get_frame_func (this_frame);
985
986 cache->available_p = 1;
987 }
988
989 /* Allocate and fill in *THIS_CACHE with information about the prologue of
990 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
991 Return a pointer to the current aarch64_prologue_cache in
992 *THIS_CACHE. */
993
994 static struct aarch64_prologue_cache *
995 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
996 {
997 struct aarch64_prologue_cache *cache;
998
999 if (*this_cache != NULL)
1000 return (struct aarch64_prologue_cache *) *this_cache;
1001
1002 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1003 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1004 *this_cache = cache;
1005
1006 try
1007 {
1008 aarch64_make_prologue_cache_1 (this_frame, cache);
1009 }
1010 catch (const gdb_exception_error &ex)
1011 {
1012 if (ex.error != NOT_AVAILABLE_ERROR)
1013 throw;
1014 }
1015
1016 return cache;
1017 }
1018
1019 /* Implement the "stop_reason" frame_unwind method. */
1020
1021 static enum unwind_stop_reason
1022 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1023 void **this_cache)
1024 {
1025 struct aarch64_prologue_cache *cache
1026 = aarch64_make_prologue_cache (this_frame, this_cache);
1027
1028 if (!cache->available_p)
1029 return UNWIND_UNAVAILABLE;
1030
1031 /* Halt the backtrace at "_start". */
1032 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1033 return UNWIND_OUTERMOST;
1034
1035 /* We've hit a wall, stop. */
1036 if (cache->prev_sp == 0)
1037 return UNWIND_OUTERMOST;
1038
1039 return UNWIND_NO_REASON;
1040 }
1041
1042 /* Our frame ID for a normal frame is the current function's starting
1043 PC and the caller's SP when we were called. */
1044
1045 static void
1046 aarch64_prologue_this_id (struct frame_info *this_frame,
1047 void **this_cache, struct frame_id *this_id)
1048 {
1049 struct aarch64_prologue_cache *cache
1050 = aarch64_make_prologue_cache (this_frame, this_cache);
1051
1052 if (!cache->available_p)
1053 *this_id = frame_id_build_unavailable_stack (cache->func);
1054 else
1055 *this_id = frame_id_build (cache->prev_sp, cache->func);
1056 }
1057
1058 /* Implement the "prev_register" frame_unwind method. */
1059
1060 static struct value *
1061 aarch64_prologue_prev_register (struct frame_info *this_frame,
1062 void **this_cache, int prev_regnum)
1063 {
1064 struct aarch64_prologue_cache *cache
1065 = aarch64_make_prologue_cache (this_frame, this_cache);
1066
1067 /* If we are asked to unwind the PC, then we need to return the LR
1068 instead. The prologue may save PC, but it will point into this
1069 frame's prologue, not the next frame's resume location. */
1070 if (prev_regnum == AARCH64_PC_REGNUM)
1071 {
1072 CORE_ADDR lr;
1073 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1075
1076 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1077
1078 if (tdep->has_pauth ()
1079 && trad_frame_value_p (cache->saved_regs,
1080 tdep->pauth_ra_state_regnum))
1081 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1082
1083 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1084 }
1085
1086 /* SP is generally not saved to the stack, but this frame is
1087 identified by the next frame's stack pointer at the time of the
1088 call. The value was already reconstructed into PREV_SP. */
1089 /*
1090 +----------+ ^
1091 | saved lr | |
1092 +->| saved fp |--+
1093 | | |
1094 | | | <- Previous SP
1095 | +----------+
1096 | | saved lr |
1097 +--| saved fp |<- FP
1098 | |
1099 | |<- SP
1100 +----------+ */
1101 if (prev_regnum == AARCH64_SP_REGNUM)
1102 return frame_unwind_got_constant (this_frame, prev_regnum,
1103 cache->prev_sp);
1104
1105 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1106 prev_regnum);
1107 }
1108
1109 /* AArch64 prologue unwinder. */
1110 struct frame_unwind aarch64_prologue_unwind =
1111 {
1112 NORMAL_FRAME,
1113 aarch64_prologue_frame_unwind_stop_reason,
1114 aarch64_prologue_this_id,
1115 aarch64_prologue_prev_register,
1116 NULL,
1117 default_frame_sniffer
1118 };
1119
1120 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1121 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1122 Return a pointer to the current aarch64_prologue_cache in
1123 *THIS_CACHE. */
1124
1125 static struct aarch64_prologue_cache *
1126 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1127 {
1128 struct aarch64_prologue_cache *cache;
1129
1130 if (*this_cache != NULL)
1131 return (struct aarch64_prologue_cache *) *this_cache;
1132
1133 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1134 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1135 *this_cache = cache;
1136
1137 try
1138 {
1139 cache->prev_sp = get_frame_register_unsigned (this_frame,
1140 AARCH64_SP_REGNUM);
1141 cache->prev_pc = get_frame_pc (this_frame);
1142 cache->available_p = 1;
1143 }
1144 catch (const gdb_exception_error &ex)
1145 {
1146 if (ex.error != NOT_AVAILABLE_ERROR)
1147 throw;
1148 }
1149
1150 return cache;
1151 }
1152
1153 /* Implement the "stop_reason" frame_unwind method. */
1154
1155 static enum unwind_stop_reason
1156 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1157 void **this_cache)
1158 {
1159 struct aarch64_prologue_cache *cache
1160 = aarch64_make_stub_cache (this_frame, this_cache);
1161
1162 if (!cache->available_p)
1163 return UNWIND_UNAVAILABLE;
1164
1165 return UNWIND_NO_REASON;
1166 }
1167
1168 /* Our frame ID for a stub frame is the current SP and LR. */
1169
1170 static void
1171 aarch64_stub_this_id (struct frame_info *this_frame,
1172 void **this_cache, struct frame_id *this_id)
1173 {
1174 struct aarch64_prologue_cache *cache
1175 = aarch64_make_stub_cache (this_frame, this_cache);
1176
1177 if (cache->available_p)
1178 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1179 else
1180 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1181 }
1182
1183 /* Implement the "sniffer" frame_unwind method. */
1184
1185 static int
1186 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1187 struct frame_info *this_frame,
1188 void **this_prologue_cache)
1189 {
1190 CORE_ADDR addr_in_block;
1191 gdb_byte dummy[4];
1192
1193 addr_in_block = get_frame_address_in_block (this_frame);
1194 if (in_plt_section (addr_in_block)
1195 /* We also use the stub winder if the target memory is unreadable
1196 to avoid having the prologue unwinder trying to read it. */
1197 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1198 return 1;
1199
1200 return 0;
1201 }
1202
1203 /* AArch64 stub unwinder. */
1204 struct frame_unwind aarch64_stub_unwind =
1205 {
1206 NORMAL_FRAME,
1207 aarch64_stub_frame_unwind_stop_reason,
1208 aarch64_stub_this_id,
1209 aarch64_prologue_prev_register,
1210 NULL,
1211 aarch64_stub_unwind_sniffer
1212 };
1213
1214 /* Return the frame base address of *THIS_FRAME. */
1215
1216 static CORE_ADDR
1217 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1218 {
1219 struct aarch64_prologue_cache *cache
1220 = aarch64_make_prologue_cache (this_frame, this_cache);
1221
1222 return cache->prev_sp - cache->framesize;
1223 }
1224
1225 /* AArch64 default frame base information. */
1226 struct frame_base aarch64_normal_base =
1227 {
1228 &aarch64_prologue_unwind,
1229 aarch64_normal_frame_base,
1230 aarch64_normal_frame_base,
1231 aarch64_normal_frame_base
1232 };
1233
1234 /* Return the value of the REGNUM register in the previous frame of
1235 *THIS_FRAME. */
1236
1237 static struct value *
1238 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1239 void **this_cache, int regnum)
1240 {
1241 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1242 CORE_ADDR lr;
1243
1244 switch (regnum)
1245 {
1246 case AARCH64_PC_REGNUM:
1247 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1248 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1249 return frame_unwind_got_constant (this_frame, regnum, lr);
1250
1251 default:
1252 internal_error (__FILE__, __LINE__,
1253 _("Unexpected register %d"), regnum);
1254 }
1255 }
1256
1257 static const unsigned char op_lit0 = DW_OP_lit0;
1258 static const unsigned char op_lit1 = DW_OP_lit1;
1259
1260 /* Implement the "init_reg" dwarf2_frame_ops method. */
1261
1262 static void
1263 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1264 struct dwarf2_frame_state_reg *reg,
1265 struct frame_info *this_frame)
1266 {
1267 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1268
1269 switch (regnum)
1270 {
1271 case AARCH64_PC_REGNUM:
1272 reg->how = DWARF2_FRAME_REG_FN;
1273 reg->loc.fn = aarch64_dwarf2_prev_register;
1274 return;
1275
1276 case AARCH64_SP_REGNUM:
1277 reg->how = DWARF2_FRAME_REG_CFA;
1278 return;
1279 }
1280
1281 /* Init pauth registers. */
1282 if (tdep->has_pauth ())
1283 {
1284 if (regnum == tdep->pauth_ra_state_regnum)
1285 {
1286 /* Initialize RA_STATE to zero. */
1287 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1288 reg->loc.exp.start = &op_lit0;
1289 reg->loc.exp.len = 1;
1290 return;
1291 }
1292 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1293 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1294 {
1295 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1296 return;
1297 }
1298 }
1299 }
1300
1301 /* Implement the execute_dwarf_cfa_vendor_op method. */
1302
1303 static bool
1304 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1305 struct dwarf2_frame_state *fs)
1306 {
1307 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1308 struct dwarf2_frame_state_reg *ra_state;
1309
1310 if (op == DW_CFA_AARCH64_negate_ra_state)
1311 {
1312 /* On systems without pauth, treat as a nop. */
1313 if (!tdep->has_pauth ())
1314 return true;
1315
1316 /* Allocate RA_STATE column if it's not allocated yet. */
1317 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1318
1319 /* Toggle the status of RA_STATE between 0 and 1. */
1320 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1321 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1322
1323 if (ra_state->loc.exp.start == nullptr
1324 || ra_state->loc.exp.start == &op_lit0)
1325 ra_state->loc.exp.start = &op_lit1;
1326 else
1327 ra_state->loc.exp.start = &op_lit0;
1328
1329 ra_state->loc.exp.len = 1;
1330
1331 return true;
1332 }
1333
1334 return false;
1335 }
1336
1337 /* Used for matching BRK instructions for AArch64. */
1338 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1339 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1340
1341 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1342
1343 static bool
1344 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1345 {
1346 const uint32_t insn_len = 4;
1347 gdb_byte target_mem[4];
1348
1349 /* Enable the automatic memory restoration from breakpoints while
1350 we read the memory. Otherwise we may find temporary breakpoints, ones
1351 inserted by GDB, and flag them as permanent breakpoints. */
1352 scoped_restore restore_memory
1353 = make_scoped_restore_show_memory_breakpoints (0);
1354
1355 if (target_read_memory (address, target_mem, insn_len) == 0)
1356 {
1357 uint32_t insn =
1358 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1359 gdbarch_byte_order_for_code (gdbarch));
1360
1361 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1362 of such instructions with different immediate values. Different OS'
1363 may use a different variation, but they have the same outcome. */
1364 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1365 }
1366
1367 return false;
1368 }
1369
1370 /* When arguments must be pushed onto the stack, they go on in reverse
1371 order. The code below implements a FILO (stack) to do this. */
1372
1373 struct stack_item_t
1374 {
1375 /* Value to pass on stack. It can be NULL if this item is for stack
1376 padding. */
1377 const gdb_byte *data;
1378
1379 /* Size in bytes of value to pass on stack. */
1380 int len;
1381 };
1382
1383 /* Implement the gdbarch type alignment method, overrides the generic
1384 alignment algorithm for anything that is aarch64 specific. */
1385
1386 static ULONGEST
1387 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1388 {
1389 t = check_typedef (t);
1390 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1391 {
1392 /* Use the natural alignment for vector types (the same for
1393 scalar type), but the maximum alignment is 128-bit. */
1394 if (TYPE_LENGTH (t) > 16)
1395 return 16;
1396 else
1397 return TYPE_LENGTH (t);
1398 }
1399
1400 /* Allow the common code to calculate the alignment. */
1401 return 0;
1402 }
1403
1404 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1405
1406 Return the number of register required, or -1 on failure.
1407
1408 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1409 to the element, else fail if the type of this element does not match the
1410 existing value. */
1411
1412 static int
1413 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1414 struct type **fundamental_type)
1415 {
1416 if (type == nullptr)
1417 return -1;
1418
1419 switch (type->code ())
1420 {
1421 case TYPE_CODE_FLT:
1422 if (TYPE_LENGTH (type) > 16)
1423 return -1;
1424
1425 if (*fundamental_type == nullptr)
1426 *fundamental_type = type;
1427 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1428 || type->code () != (*fundamental_type)->code ())
1429 return -1;
1430
1431 return 1;
1432
1433 case TYPE_CODE_COMPLEX:
1434 {
1435 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1436 if (TYPE_LENGTH (target_type) > 16)
1437 return -1;
1438
1439 if (*fundamental_type == nullptr)
1440 *fundamental_type = target_type;
1441 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1442 || target_type->code () != (*fundamental_type)->code ())
1443 return -1;
1444
1445 return 2;
1446 }
1447
1448 case TYPE_CODE_ARRAY:
1449 {
1450 if (type->is_vector ())
1451 {
1452 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1453 return -1;
1454
1455 if (*fundamental_type == nullptr)
1456 *fundamental_type = type;
1457 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1458 || type->code () != (*fundamental_type)->code ())
1459 return -1;
1460
1461 return 1;
1462 }
1463 else
1464 {
1465 struct type *target_type = TYPE_TARGET_TYPE (type);
1466 int count = aapcs_is_vfp_call_or_return_candidate_1
1467 (target_type, fundamental_type);
1468
1469 if (count == -1)
1470 return count;
1471
1472 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1473 return count;
1474 }
1475 }
1476
1477 case TYPE_CODE_STRUCT:
1478 case TYPE_CODE_UNION:
1479 {
1480 int count = 0;
1481
1482 for (int i = 0; i < type->num_fields (); i++)
1483 {
1484 /* Ignore any static fields. */
1485 if (field_is_static (&type->field (i)))
1486 continue;
1487
1488 struct type *member = check_typedef (type->field (i).type ());
1489
1490 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1491 (member, fundamental_type);
1492 if (sub_count == -1)
1493 return -1;
1494 count += sub_count;
1495 }
1496
1497 /* Ensure there is no padding between the fields (allowing for empty
1498 zero length structs) */
1499 int ftype_length = (*fundamental_type == nullptr)
1500 ? 0 : TYPE_LENGTH (*fundamental_type);
1501 if (count * ftype_length != TYPE_LENGTH (type))
1502 return -1;
1503
1504 return count;
1505 }
1506
1507 default:
1508 break;
1509 }
1510
1511 return -1;
1512 }
1513
1514 /* Return true if an argument, whose type is described by TYPE, can be passed or
1515 returned in simd/fp registers, providing enough parameter passing registers
1516 are available. This is as described in the AAPCS64.
1517
1518 Upon successful return, *COUNT returns the number of needed registers,
1519 *FUNDAMENTAL_TYPE contains the type of those registers.
1520
1521 Candidate as per the AAPCS64 5.4.2.C is either a:
1522 - float.
1523 - short-vector.
1524 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1525 all the members are floats and has at most 4 members.
1526 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1527 all the members are short vectors and has at most 4 members.
1528 - Complex (7.1.1)
1529
1530 Note that HFAs and HVAs can include nested structures and arrays. */
1531
1532 static bool
1533 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1534 struct type **fundamental_type)
1535 {
1536 if (type == nullptr)
1537 return false;
1538
1539 *fundamental_type = nullptr;
1540
1541 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1542 fundamental_type);
1543
1544 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1545 {
1546 *count = ag_count;
1547 return true;
1548 }
1549 else
1550 return false;
1551 }
1552
1553 /* AArch64 function call information structure. */
1554 struct aarch64_call_info
1555 {
1556 /* the current argument number. */
1557 unsigned argnum = 0;
1558
1559 /* The next general purpose register number, equivalent to NGRN as
1560 described in the AArch64 Procedure Call Standard. */
1561 unsigned ngrn = 0;
1562
1563 /* The next SIMD and floating point register number, equivalent to
1564 NSRN as described in the AArch64 Procedure Call Standard. */
1565 unsigned nsrn = 0;
1566
1567 /* The next stacked argument address, equivalent to NSAA as
1568 described in the AArch64 Procedure Call Standard. */
1569 unsigned nsaa = 0;
1570
1571 /* Stack item vector. */
1572 std::vector<stack_item_t> si;
1573 };
1574
1575 /* Pass a value in a sequence of consecutive X registers. The caller
1576 is responsible for ensuring sufficient registers are available. */
1577
1578 static void
1579 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1580 struct aarch64_call_info *info, struct type *type,
1581 struct value *arg)
1582 {
1583 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1584 int len = TYPE_LENGTH (type);
1585 enum type_code typecode = type->code ();
1586 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1587 const bfd_byte *buf = value_contents (arg);
1588
1589 info->argnum++;
1590
1591 while (len > 0)
1592 {
1593 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1594 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1595 byte_order);
1596
1597
1598 /* Adjust sub-word struct/union args when big-endian. */
1599 if (byte_order == BFD_ENDIAN_BIG
1600 && partial_len < X_REGISTER_SIZE
1601 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1602 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1603
1604 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1605 gdbarch_register_name (gdbarch, regnum),
1606 phex (regval, X_REGISTER_SIZE));
1607
1608 regcache_cooked_write_unsigned (regcache, regnum, regval);
1609 len -= partial_len;
1610 buf += partial_len;
1611 regnum++;
1612 }
1613 }
1614
1615 /* Attempt to marshall a value in a V register. Return 1 if
1616 successful, or 0 if insufficient registers are available. This
1617 function, unlike the equivalent pass_in_x() function does not
1618 handle arguments spread across multiple registers. */
1619
1620 static int
1621 pass_in_v (struct gdbarch *gdbarch,
1622 struct regcache *regcache,
1623 struct aarch64_call_info *info,
1624 int len, const bfd_byte *buf)
1625 {
1626 if (info->nsrn < 8)
1627 {
1628 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1629 /* Enough space for a full vector register. */
1630 gdb_byte reg[register_size (gdbarch, regnum)];
1631 gdb_assert (len <= sizeof (reg));
1632
1633 info->argnum++;
1634 info->nsrn++;
1635
1636 memset (reg, 0, sizeof (reg));
1637 /* PCS C.1, the argument is allocated to the least significant
1638 bits of V register. */
1639 memcpy (reg, buf, len);
1640 regcache->cooked_write (regnum, reg);
1641
1642 aarch64_debug_printf ("arg %d in %s", info->argnum,
1643 gdbarch_register_name (gdbarch, regnum));
1644
1645 return 1;
1646 }
1647 info->nsrn = 8;
1648 return 0;
1649 }
1650
1651 /* Marshall an argument onto the stack. */
1652
1653 static void
1654 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1655 struct value *arg)
1656 {
1657 const bfd_byte *buf = value_contents (arg);
1658 int len = TYPE_LENGTH (type);
1659 int align;
1660 stack_item_t item;
1661
1662 info->argnum++;
1663
1664 align = type_align (type);
1665
1666 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1667 Natural alignment of the argument's type. */
1668 align = align_up (align, 8);
1669
1670 /* The AArch64 PCS requires at most doubleword alignment. */
1671 if (align > 16)
1672 align = 16;
1673
1674 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1675 info->nsaa);
1676
1677 item.len = len;
1678 item.data = buf;
1679 info->si.push_back (item);
1680
1681 info->nsaa += len;
1682 if (info->nsaa & (align - 1))
1683 {
1684 /* Push stack alignment padding. */
1685 int pad = align - (info->nsaa & (align - 1));
1686
1687 item.len = pad;
1688 item.data = NULL;
1689
1690 info->si.push_back (item);
1691 info->nsaa += pad;
1692 }
1693 }
1694
1695 /* Marshall an argument into a sequence of one or more consecutive X
1696 registers or, if insufficient X registers are available then onto
1697 the stack. */
1698
1699 static void
1700 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1701 struct aarch64_call_info *info, struct type *type,
1702 struct value *arg)
1703 {
1704 int len = TYPE_LENGTH (type);
1705 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1706
1707 /* PCS C.13 - Pass in registers if we have enough spare */
1708 if (info->ngrn + nregs <= 8)
1709 {
1710 pass_in_x (gdbarch, regcache, info, type, arg);
1711 info->ngrn += nregs;
1712 }
1713 else
1714 {
1715 info->ngrn = 8;
1716 pass_on_stack (info, type, arg);
1717 }
1718 }
1719
1720 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1721 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1722 registers. A return value of false is an error state as the value will have
1723 been partially passed to the stack. */
1724 static bool
1725 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1726 struct aarch64_call_info *info, struct type *arg_type,
1727 struct value *arg)
1728 {
1729 switch (arg_type->code ())
1730 {
1731 case TYPE_CODE_FLT:
1732 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1733 value_contents (arg));
1734 break;
1735
1736 case TYPE_CODE_COMPLEX:
1737 {
1738 const bfd_byte *buf = value_contents (arg);
1739 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1740
1741 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1742 buf))
1743 return false;
1744
1745 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1746 buf + TYPE_LENGTH (target_type));
1747 }
1748
1749 case TYPE_CODE_ARRAY:
1750 if (arg_type->is_vector ())
1751 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1752 value_contents (arg));
1753 /* fall through. */
1754
1755 case TYPE_CODE_STRUCT:
1756 case TYPE_CODE_UNION:
1757 for (int i = 0; i < arg_type->num_fields (); i++)
1758 {
1759 /* Don't include static fields. */
1760 if (field_is_static (&arg_type->field (i)))
1761 continue;
1762
1763 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1764 struct type *field_type = check_typedef (value_type (field));
1765
1766 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1767 field))
1768 return false;
1769 }
1770 return true;
1771
1772 default:
1773 return false;
1774 }
1775 }
1776
1777 /* Implement the "push_dummy_call" gdbarch method. */
1778
1779 static CORE_ADDR
1780 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1781 struct regcache *regcache, CORE_ADDR bp_addr,
1782 int nargs,
1783 struct value **args, CORE_ADDR sp,
1784 function_call_return_method return_method,
1785 CORE_ADDR struct_addr)
1786 {
1787 int argnum;
1788 struct aarch64_call_info info;
1789
1790 /* We need to know what the type of the called function is in order
1791 to determine the number of named/anonymous arguments for the
1792 actual argument placement, and the return type in order to handle
1793 return value correctly.
1794
1795 The generic code above us views the decision of return in memory
1796 or return in registers as a two stage processes. The language
1797 handler is consulted first and may decide to return in memory (eg
1798 class with copy constructor returned by value), this will cause
1799 the generic code to allocate space AND insert an initial leading
1800 argument.
1801
1802 If the language code does not decide to pass in memory then the
1803 target code is consulted.
1804
1805 If the language code decides to pass in memory we want to move
1806 the pointer inserted as the initial argument from the argument
1807 list and into X8, the conventional AArch64 struct return pointer
1808 register. */
1809
1810 /* Set the return address. For the AArch64, the return breakpoint
1811 is always at BP_ADDR. */
1812 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1813
1814 /* If we were given an initial argument for the return slot, lose it. */
1815 if (return_method == return_method_hidden_param)
1816 {
1817 args++;
1818 nargs--;
1819 }
1820
1821 /* The struct_return pointer occupies X8. */
1822 if (return_method != return_method_normal)
1823 {
1824 aarch64_debug_printf ("struct return in %s = 0x%s",
1825 gdbarch_register_name
1826 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1827 paddress (gdbarch, struct_addr));
1828
1829 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1830 struct_addr);
1831 }
1832
1833 for (argnum = 0; argnum < nargs; argnum++)
1834 {
1835 struct value *arg = args[argnum];
1836 struct type *arg_type, *fundamental_type;
1837 int len, elements;
1838
1839 arg_type = check_typedef (value_type (arg));
1840 len = TYPE_LENGTH (arg_type);
1841
1842 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1843 if there are enough spare registers. */
1844 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1845 &fundamental_type))
1846 {
1847 if (info.nsrn + elements <= 8)
1848 {
1849 /* We know that we have sufficient registers available therefore
1850 this will never need to fallback to the stack. */
1851 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1852 arg))
1853 gdb_assert_not_reached ("Failed to push args");
1854 }
1855 else
1856 {
1857 info.nsrn = 8;
1858 pass_on_stack (&info, arg_type, arg);
1859 }
1860 continue;
1861 }
1862
1863 switch (arg_type->code ())
1864 {
1865 case TYPE_CODE_INT:
1866 case TYPE_CODE_BOOL:
1867 case TYPE_CODE_CHAR:
1868 case TYPE_CODE_RANGE:
1869 case TYPE_CODE_ENUM:
1870 if (len < 4)
1871 {
1872 /* Promote to 32 bit integer. */
1873 if (arg_type->is_unsigned ())
1874 arg_type = builtin_type (gdbarch)->builtin_uint32;
1875 else
1876 arg_type = builtin_type (gdbarch)->builtin_int32;
1877 arg = value_cast (arg_type, arg);
1878 }
1879 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1880 break;
1881
1882 case TYPE_CODE_STRUCT:
1883 case TYPE_CODE_ARRAY:
1884 case TYPE_CODE_UNION:
1885 if (len > 16)
1886 {
1887 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1888 invisible reference. */
1889
1890 /* Allocate aligned storage. */
1891 sp = align_down (sp - len, 16);
1892
1893 /* Write the real data into the stack. */
1894 write_memory (sp, value_contents (arg), len);
1895
1896 /* Construct the indirection. */
1897 arg_type = lookup_pointer_type (arg_type);
1898 arg = value_from_pointer (arg_type, sp);
1899 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1900 }
1901 else
1902 /* PCS C.15 / C.18 multiple values pass. */
1903 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1904 break;
1905
1906 default:
1907 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1908 break;
1909 }
1910 }
1911
1912 /* Make sure stack retains 16 byte alignment. */
1913 if (info.nsaa & 15)
1914 sp -= 16 - (info.nsaa & 15);
1915
1916 while (!info.si.empty ())
1917 {
1918 const stack_item_t &si = info.si.back ();
1919
1920 sp -= si.len;
1921 if (si.data != NULL)
1922 write_memory (sp, si.data, si.len);
1923 info.si.pop_back ();
1924 }
1925
1926 /* Finally, update the SP register. */
1927 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1928
1929 return sp;
1930 }
1931
1932 /* Implement the "frame_align" gdbarch method. */
1933
1934 static CORE_ADDR
1935 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1936 {
1937 /* Align the stack to sixteen bytes. */
1938 return sp & ~(CORE_ADDR) 15;
1939 }
1940
1941 /* Return the type for an AdvSISD Q register. */
1942
1943 static struct type *
1944 aarch64_vnq_type (struct gdbarch *gdbarch)
1945 {
1946 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1947
1948 if (tdep->vnq_type == NULL)
1949 {
1950 struct type *t;
1951 struct type *elem;
1952
1953 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1954 TYPE_CODE_UNION);
1955
1956 elem = builtin_type (gdbarch)->builtin_uint128;
1957 append_composite_type_field (t, "u", elem);
1958
1959 elem = builtin_type (gdbarch)->builtin_int128;
1960 append_composite_type_field (t, "s", elem);
1961
1962 tdep->vnq_type = t;
1963 }
1964
1965 return tdep->vnq_type;
1966 }
1967
1968 /* Return the type for an AdvSISD D register. */
1969
1970 static struct type *
1971 aarch64_vnd_type (struct gdbarch *gdbarch)
1972 {
1973 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1974
1975 if (tdep->vnd_type == NULL)
1976 {
1977 struct type *t;
1978 struct type *elem;
1979
1980 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1981 TYPE_CODE_UNION);
1982
1983 elem = builtin_type (gdbarch)->builtin_double;
1984 append_composite_type_field (t, "f", elem);
1985
1986 elem = builtin_type (gdbarch)->builtin_uint64;
1987 append_composite_type_field (t, "u", elem);
1988
1989 elem = builtin_type (gdbarch)->builtin_int64;
1990 append_composite_type_field (t, "s", elem);
1991
1992 tdep->vnd_type = t;
1993 }
1994
1995 return tdep->vnd_type;
1996 }
1997
1998 /* Return the type for an AdvSISD S register. */
1999
2000 static struct type *
2001 aarch64_vns_type (struct gdbarch *gdbarch)
2002 {
2003 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2004
2005 if (tdep->vns_type == NULL)
2006 {
2007 struct type *t;
2008 struct type *elem;
2009
2010 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2011 TYPE_CODE_UNION);
2012
2013 elem = builtin_type (gdbarch)->builtin_float;
2014 append_composite_type_field (t, "f", elem);
2015
2016 elem = builtin_type (gdbarch)->builtin_uint32;
2017 append_composite_type_field (t, "u", elem);
2018
2019 elem = builtin_type (gdbarch)->builtin_int32;
2020 append_composite_type_field (t, "s", elem);
2021
2022 tdep->vns_type = t;
2023 }
2024
2025 return tdep->vns_type;
2026 }
2027
2028 /* Return the type for an AdvSISD H register. */
2029
2030 static struct type *
2031 aarch64_vnh_type (struct gdbarch *gdbarch)
2032 {
2033 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2034
2035 if (tdep->vnh_type == NULL)
2036 {
2037 struct type *t;
2038 struct type *elem;
2039
2040 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2041 TYPE_CODE_UNION);
2042
2043 elem = builtin_type (gdbarch)->builtin_bfloat16;
2044 append_composite_type_field (t, "bf", elem);
2045
2046 elem = builtin_type (gdbarch)->builtin_half;
2047 append_composite_type_field (t, "f", elem);
2048
2049 elem = builtin_type (gdbarch)->builtin_uint16;
2050 append_composite_type_field (t, "u", elem);
2051
2052 elem = builtin_type (gdbarch)->builtin_int16;
2053 append_composite_type_field (t, "s", elem);
2054
2055 tdep->vnh_type = t;
2056 }
2057
2058 return tdep->vnh_type;
2059 }
2060
2061 /* Return the type for an AdvSISD B register. */
2062
2063 static struct type *
2064 aarch64_vnb_type (struct gdbarch *gdbarch)
2065 {
2066 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2067
2068 if (tdep->vnb_type == NULL)
2069 {
2070 struct type *t;
2071 struct type *elem;
2072
2073 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2074 TYPE_CODE_UNION);
2075
2076 elem = builtin_type (gdbarch)->builtin_uint8;
2077 append_composite_type_field (t, "u", elem);
2078
2079 elem = builtin_type (gdbarch)->builtin_int8;
2080 append_composite_type_field (t, "s", elem);
2081
2082 tdep->vnb_type = t;
2083 }
2084
2085 return tdep->vnb_type;
2086 }
2087
2088 /* Return the type for an AdvSISD V register. */
2089
2090 static struct type *
2091 aarch64_vnv_type (struct gdbarch *gdbarch)
2092 {
2093 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2094
2095 if (tdep->vnv_type == NULL)
2096 {
2097 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2098 slice from the non-pseudo vector registers. However NEON V registers
2099 are always vector registers, and need constructing as such. */
2100 const struct builtin_type *bt = builtin_type (gdbarch);
2101
2102 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2103 TYPE_CODE_UNION);
2104
2105 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2106 TYPE_CODE_UNION);
2107 append_composite_type_field (sub, "f",
2108 init_vector_type (bt->builtin_double, 2));
2109 append_composite_type_field (sub, "u",
2110 init_vector_type (bt->builtin_uint64, 2));
2111 append_composite_type_field (sub, "s",
2112 init_vector_type (bt->builtin_int64, 2));
2113 append_composite_type_field (t, "d", sub);
2114
2115 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2116 TYPE_CODE_UNION);
2117 append_composite_type_field (sub, "f",
2118 init_vector_type (bt->builtin_float, 4));
2119 append_composite_type_field (sub, "u",
2120 init_vector_type (bt->builtin_uint32, 4));
2121 append_composite_type_field (sub, "s",
2122 init_vector_type (bt->builtin_int32, 4));
2123 append_composite_type_field (t, "s", sub);
2124
2125 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2126 TYPE_CODE_UNION);
2127 append_composite_type_field (sub, "bf",
2128 init_vector_type (bt->builtin_bfloat16, 8));
2129 append_composite_type_field (sub, "f",
2130 init_vector_type (bt->builtin_half, 8));
2131 append_composite_type_field (sub, "u",
2132 init_vector_type (bt->builtin_uint16, 8));
2133 append_composite_type_field (sub, "s",
2134 init_vector_type (bt->builtin_int16, 8));
2135 append_composite_type_field (t, "h", sub);
2136
2137 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2138 TYPE_CODE_UNION);
2139 append_composite_type_field (sub, "u",
2140 init_vector_type (bt->builtin_uint8, 16));
2141 append_composite_type_field (sub, "s",
2142 init_vector_type (bt->builtin_int8, 16));
2143 append_composite_type_field (t, "b", sub);
2144
2145 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2146 TYPE_CODE_UNION);
2147 append_composite_type_field (sub, "u",
2148 init_vector_type (bt->builtin_uint128, 1));
2149 append_composite_type_field (sub, "s",
2150 init_vector_type (bt->builtin_int128, 1));
2151 append_composite_type_field (t, "q", sub);
2152
2153 tdep->vnv_type = t;
2154 }
2155
2156 return tdep->vnv_type;
2157 }
2158
2159 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2160
2161 static int
2162 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2163 {
2164 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2165
2166 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2167 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2168
2169 if (reg == AARCH64_DWARF_SP)
2170 return AARCH64_SP_REGNUM;
2171
2172 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2173 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2174
2175 if (reg == AARCH64_DWARF_SVE_VG)
2176 return AARCH64_SVE_VG_REGNUM;
2177
2178 if (reg == AARCH64_DWARF_SVE_FFR)
2179 return AARCH64_SVE_FFR_REGNUM;
2180
2181 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2182 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2183
2184 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2185 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2186
2187 if (tdep->has_pauth ())
2188 {
2189 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2190 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2191
2192 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2193 return tdep->pauth_ra_state_regnum;
2194 }
2195
2196 return -1;
2197 }
2198
2199 /* Implement the "print_insn" gdbarch method. */
2200
2201 static int
2202 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2203 {
2204 info->symbols = NULL;
2205 return default_print_insn (memaddr, info);
2206 }
2207
2208 /* AArch64 BRK software debug mode instruction.
2209 Note that AArch64 code is always little-endian.
2210 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2211 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2212
2213 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2214
2215 /* Extract from an array REGS containing the (raw) register state a
2216 function return value of type TYPE, and copy that, in virtual
2217 format, into VALBUF. */
2218
2219 static void
2220 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2221 gdb_byte *valbuf)
2222 {
2223 struct gdbarch *gdbarch = regs->arch ();
2224 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2225 int elements;
2226 struct type *fundamental_type;
2227
2228 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2229 &fundamental_type))
2230 {
2231 int len = TYPE_LENGTH (fundamental_type);
2232
2233 for (int i = 0; i < elements; i++)
2234 {
2235 int regno = AARCH64_V0_REGNUM + i;
2236 /* Enough space for a full vector register. */
2237 gdb_byte buf[register_size (gdbarch, regno)];
2238 gdb_assert (len <= sizeof (buf));
2239
2240 aarch64_debug_printf
2241 ("read HFA or HVA return value element %d from %s",
2242 i + 1, gdbarch_register_name (gdbarch, regno));
2243
2244 regs->cooked_read (regno, buf);
2245
2246 memcpy (valbuf, buf, len);
2247 valbuf += len;
2248 }
2249 }
2250 else if (type->code () == TYPE_CODE_INT
2251 || type->code () == TYPE_CODE_CHAR
2252 || type->code () == TYPE_CODE_BOOL
2253 || type->code () == TYPE_CODE_PTR
2254 || TYPE_IS_REFERENCE (type)
2255 || type->code () == TYPE_CODE_ENUM)
2256 {
2257 /* If the type is a plain integer, then the access is
2258 straight-forward. Otherwise we have to play around a bit
2259 more. */
2260 int len = TYPE_LENGTH (type);
2261 int regno = AARCH64_X0_REGNUM;
2262 ULONGEST tmp;
2263
2264 while (len > 0)
2265 {
2266 /* By using store_unsigned_integer we avoid having to do
2267 anything special for small big-endian values. */
2268 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2269 store_unsigned_integer (valbuf,
2270 (len > X_REGISTER_SIZE
2271 ? X_REGISTER_SIZE : len), byte_order, tmp);
2272 len -= X_REGISTER_SIZE;
2273 valbuf += X_REGISTER_SIZE;
2274 }
2275 }
2276 else
2277 {
2278 /* For a structure or union the behaviour is as if the value had
2279 been stored to word-aligned memory and then loaded into
2280 registers with 64-bit load instruction(s). */
2281 int len = TYPE_LENGTH (type);
2282 int regno = AARCH64_X0_REGNUM;
2283 bfd_byte buf[X_REGISTER_SIZE];
2284
2285 while (len > 0)
2286 {
2287 regs->cooked_read (regno++, buf);
2288 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2289 len -= X_REGISTER_SIZE;
2290 valbuf += X_REGISTER_SIZE;
2291 }
2292 }
2293 }
2294
2295
2296 /* Will a function return an aggregate type in memory or in a
2297 register? Return 0 if an aggregate type can be returned in a
2298 register, 1 if it must be returned in memory. */
2299
2300 static int
2301 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2302 {
2303 type = check_typedef (type);
2304 int elements;
2305 struct type *fundamental_type;
2306
2307 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2308 &fundamental_type))
2309 {
2310 /* v0-v7 are used to return values and one register is allocated
2311 for one member. However, HFA or HVA has at most four members. */
2312 return 0;
2313 }
2314
2315 if (TYPE_LENGTH (type) > 16)
2316 {
2317 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2318 invisible reference. */
2319
2320 return 1;
2321 }
2322
2323 return 0;
2324 }
2325
2326 /* Write into appropriate registers a function return value of type
2327 TYPE, given in virtual format. */
2328
2329 static void
2330 aarch64_store_return_value (struct type *type, struct regcache *regs,
2331 const gdb_byte *valbuf)
2332 {
2333 struct gdbarch *gdbarch = regs->arch ();
2334 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2335 int elements;
2336 struct type *fundamental_type;
2337
2338 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2339 &fundamental_type))
2340 {
2341 int len = TYPE_LENGTH (fundamental_type);
2342
2343 for (int i = 0; i < elements; i++)
2344 {
2345 int regno = AARCH64_V0_REGNUM + i;
2346 /* Enough space for a full vector register. */
2347 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2348 gdb_assert (len <= sizeof (tmpbuf));
2349
2350 aarch64_debug_printf
2351 ("write HFA or HVA return value element %d to %s",
2352 i + 1, gdbarch_register_name (gdbarch, regno));
2353
2354 memcpy (tmpbuf, valbuf,
2355 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2356 regs->cooked_write (regno, tmpbuf);
2357 valbuf += len;
2358 }
2359 }
2360 else if (type->code () == TYPE_CODE_INT
2361 || type->code () == TYPE_CODE_CHAR
2362 || type->code () == TYPE_CODE_BOOL
2363 || type->code () == TYPE_CODE_PTR
2364 || TYPE_IS_REFERENCE (type)
2365 || type->code () == TYPE_CODE_ENUM)
2366 {
2367 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2368 {
2369 /* Values of one word or less are zero/sign-extended and
2370 returned in r0. */
2371 bfd_byte tmpbuf[X_REGISTER_SIZE];
2372 LONGEST val = unpack_long (type, valbuf);
2373
2374 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2375 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2376 }
2377 else
2378 {
2379 /* Integral values greater than one word are stored in
2380 consecutive registers starting with r0. This will always
2381 be a multiple of the regiser size. */
2382 int len = TYPE_LENGTH (type);
2383 int regno = AARCH64_X0_REGNUM;
2384
2385 while (len > 0)
2386 {
2387 regs->cooked_write (regno++, valbuf);
2388 len -= X_REGISTER_SIZE;
2389 valbuf += X_REGISTER_SIZE;
2390 }
2391 }
2392 }
2393 else
2394 {
2395 /* For a structure or union the behaviour is as if the value had
2396 been stored to word-aligned memory and then loaded into
2397 registers with 64-bit load instruction(s). */
2398 int len = TYPE_LENGTH (type);
2399 int regno = AARCH64_X0_REGNUM;
2400 bfd_byte tmpbuf[X_REGISTER_SIZE];
2401
2402 while (len > 0)
2403 {
2404 memcpy (tmpbuf, valbuf,
2405 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2406 regs->cooked_write (regno++, tmpbuf);
2407 len -= X_REGISTER_SIZE;
2408 valbuf += X_REGISTER_SIZE;
2409 }
2410 }
2411 }
2412
2413 /* Implement the "return_value" gdbarch method. */
2414
2415 static enum return_value_convention
2416 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2417 struct type *valtype, struct regcache *regcache,
2418 gdb_byte *readbuf, const gdb_byte *writebuf)
2419 {
2420
2421 if (valtype->code () == TYPE_CODE_STRUCT
2422 || valtype->code () == TYPE_CODE_UNION
2423 || valtype->code () == TYPE_CODE_ARRAY)
2424 {
2425 if (aarch64_return_in_memory (gdbarch, valtype))
2426 {
2427 aarch64_debug_printf ("return value in memory");
2428 return RETURN_VALUE_STRUCT_CONVENTION;
2429 }
2430 }
2431
2432 if (writebuf)
2433 aarch64_store_return_value (valtype, regcache, writebuf);
2434
2435 if (readbuf)
2436 aarch64_extract_return_value (valtype, regcache, readbuf);
2437
2438 aarch64_debug_printf ("return value in registers");
2439
2440 return RETURN_VALUE_REGISTER_CONVENTION;
2441 }
2442
2443 /* Implement the "get_longjmp_target" gdbarch method. */
2444
2445 static int
2446 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2447 {
2448 CORE_ADDR jb_addr;
2449 gdb_byte buf[X_REGISTER_SIZE];
2450 struct gdbarch *gdbarch = get_frame_arch (frame);
2451 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2452 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2453
2454 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2455
2456 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2457 X_REGISTER_SIZE))
2458 return 0;
2459
2460 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2461 return 1;
2462 }
2463
2464 /* Implement the "gen_return_address" gdbarch method. */
2465
2466 static void
2467 aarch64_gen_return_address (struct gdbarch *gdbarch,
2468 struct agent_expr *ax, struct axs_value *value,
2469 CORE_ADDR scope)
2470 {
2471 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2472 value->kind = axs_lvalue_register;
2473 value->u.reg = AARCH64_LR_REGNUM;
2474 }
2475 \f
2476
2477 /* Return the pseudo register name corresponding to register regnum. */
2478
2479 static const char *
2480 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2481 {
2482 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2483
2484 static const char *const q_name[] =
2485 {
2486 "q0", "q1", "q2", "q3",
2487 "q4", "q5", "q6", "q7",
2488 "q8", "q9", "q10", "q11",
2489 "q12", "q13", "q14", "q15",
2490 "q16", "q17", "q18", "q19",
2491 "q20", "q21", "q22", "q23",
2492 "q24", "q25", "q26", "q27",
2493 "q28", "q29", "q30", "q31",
2494 };
2495
2496 static const char *const d_name[] =
2497 {
2498 "d0", "d1", "d2", "d3",
2499 "d4", "d5", "d6", "d7",
2500 "d8", "d9", "d10", "d11",
2501 "d12", "d13", "d14", "d15",
2502 "d16", "d17", "d18", "d19",
2503 "d20", "d21", "d22", "d23",
2504 "d24", "d25", "d26", "d27",
2505 "d28", "d29", "d30", "d31",
2506 };
2507
2508 static const char *const s_name[] =
2509 {
2510 "s0", "s1", "s2", "s3",
2511 "s4", "s5", "s6", "s7",
2512 "s8", "s9", "s10", "s11",
2513 "s12", "s13", "s14", "s15",
2514 "s16", "s17", "s18", "s19",
2515 "s20", "s21", "s22", "s23",
2516 "s24", "s25", "s26", "s27",
2517 "s28", "s29", "s30", "s31",
2518 };
2519
2520 static const char *const h_name[] =
2521 {
2522 "h0", "h1", "h2", "h3",
2523 "h4", "h5", "h6", "h7",
2524 "h8", "h9", "h10", "h11",
2525 "h12", "h13", "h14", "h15",
2526 "h16", "h17", "h18", "h19",
2527 "h20", "h21", "h22", "h23",
2528 "h24", "h25", "h26", "h27",
2529 "h28", "h29", "h30", "h31",
2530 };
2531
2532 static const char *const b_name[] =
2533 {
2534 "b0", "b1", "b2", "b3",
2535 "b4", "b5", "b6", "b7",
2536 "b8", "b9", "b10", "b11",
2537 "b12", "b13", "b14", "b15",
2538 "b16", "b17", "b18", "b19",
2539 "b20", "b21", "b22", "b23",
2540 "b24", "b25", "b26", "b27",
2541 "b28", "b29", "b30", "b31",
2542 };
2543
2544 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2545
2546 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2547 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2548
2549 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2550 return d_name[p_regnum - AARCH64_D0_REGNUM];
2551
2552 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2553 return s_name[p_regnum - AARCH64_S0_REGNUM];
2554
2555 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2556 return h_name[p_regnum - AARCH64_H0_REGNUM];
2557
2558 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2559 return b_name[p_regnum - AARCH64_B0_REGNUM];
2560
2561 if (tdep->has_sve ())
2562 {
2563 static const char *const sve_v_name[] =
2564 {
2565 "v0", "v1", "v2", "v3",
2566 "v4", "v5", "v6", "v7",
2567 "v8", "v9", "v10", "v11",
2568 "v12", "v13", "v14", "v15",
2569 "v16", "v17", "v18", "v19",
2570 "v20", "v21", "v22", "v23",
2571 "v24", "v25", "v26", "v27",
2572 "v28", "v29", "v30", "v31",
2573 };
2574
2575 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2576 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2577 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2578 }
2579
2580 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2581 prevents it from being read by methods such as
2582 mi_cmd_trace_frame_collected. */
2583 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2584 return "";
2585
2586 internal_error (__FILE__, __LINE__,
2587 _("aarch64_pseudo_register_name: bad register number %d"),
2588 p_regnum);
2589 }
2590
2591 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2592
2593 static struct type *
2594 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2595 {
2596 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2597
2598 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2599
2600 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2601 return aarch64_vnq_type (gdbarch);
2602
2603 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2604 return aarch64_vnd_type (gdbarch);
2605
2606 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2607 return aarch64_vns_type (gdbarch);
2608
2609 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2610 return aarch64_vnh_type (gdbarch);
2611
2612 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2613 return aarch64_vnb_type (gdbarch);
2614
2615 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2616 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2617 return aarch64_vnv_type (gdbarch);
2618
2619 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2620 return builtin_type (gdbarch)->builtin_uint64;
2621
2622 internal_error (__FILE__, __LINE__,
2623 _("aarch64_pseudo_register_type: bad register number %d"),
2624 p_regnum);
2625 }
2626
2627 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2628
2629 static int
2630 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2631 struct reggroup *group)
2632 {
2633 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2634
2635 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2636
2637 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2638 return group == all_reggroup || group == vector_reggroup;
2639 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2640 return (group == all_reggroup || group == vector_reggroup
2641 || group == float_reggroup);
2642 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2643 return (group == all_reggroup || group == vector_reggroup
2644 || group == float_reggroup);
2645 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2646 return group == all_reggroup || group == vector_reggroup;
2647 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2648 return group == all_reggroup || group == vector_reggroup;
2649 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2650 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2651 return group == all_reggroup || group == vector_reggroup;
2652 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2653 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2654 return 0;
2655
2656 return group == all_reggroup;
2657 }
2658
2659 /* Helper for aarch64_pseudo_read_value. */
2660
2661 static struct value *
2662 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2663 readable_regcache *regcache, int regnum_offset,
2664 int regsize, struct value *result_value)
2665 {
2666 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2667
2668 /* Enough space for a full vector register. */
2669 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2670 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2671
2672 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2673 mark_value_bytes_unavailable (result_value, 0,
2674 TYPE_LENGTH (value_type (result_value)));
2675 else
2676 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2677
2678 return result_value;
2679 }
2680
2681 /* Implement the "pseudo_register_read_value" gdbarch method. */
2682
2683 static struct value *
2684 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2685 int regnum)
2686 {
2687 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2688 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2689
2690 VALUE_LVAL (result_value) = lval_register;
2691 VALUE_REGNUM (result_value) = regnum;
2692
2693 regnum -= gdbarch_num_regs (gdbarch);
2694
2695 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2696 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2697 regnum - AARCH64_Q0_REGNUM,
2698 Q_REGISTER_SIZE, result_value);
2699
2700 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2701 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2702 regnum - AARCH64_D0_REGNUM,
2703 D_REGISTER_SIZE, result_value);
2704
2705 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2706 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2707 regnum - AARCH64_S0_REGNUM,
2708 S_REGISTER_SIZE, result_value);
2709
2710 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2711 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2712 regnum - AARCH64_H0_REGNUM,
2713 H_REGISTER_SIZE, result_value);
2714
2715 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2716 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2717 regnum - AARCH64_B0_REGNUM,
2718 B_REGISTER_SIZE, result_value);
2719
2720 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2721 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2722 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2723 regnum - AARCH64_SVE_V0_REGNUM,
2724 V_REGISTER_SIZE, result_value);
2725
2726 gdb_assert_not_reached ("regnum out of bound");
2727 }
2728
2729 /* Helper for aarch64_pseudo_write. */
2730
2731 static void
2732 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2733 int regnum_offset, int regsize, const gdb_byte *buf)
2734 {
2735 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2736
2737 /* Enough space for a full vector register. */
2738 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2739 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2740
2741 /* Ensure the register buffer is zero, we want gdb writes of the
2742 various 'scalar' pseudo registers to behavior like architectural
2743 writes, register width bytes are written the remainder are set to
2744 zero. */
2745 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2746
2747 memcpy (reg_buf, buf, regsize);
2748 regcache->raw_write (v_regnum, reg_buf);
2749 }
2750
2751 /* Implement the "pseudo_register_write" gdbarch method. */
2752
2753 static void
2754 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2755 int regnum, const gdb_byte *buf)
2756 {
2757 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2758 regnum -= gdbarch_num_regs (gdbarch);
2759
2760 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2761 return aarch64_pseudo_write_1 (gdbarch, regcache,
2762 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2763 buf);
2764
2765 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2766 return aarch64_pseudo_write_1 (gdbarch, regcache,
2767 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2768 buf);
2769
2770 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2771 return aarch64_pseudo_write_1 (gdbarch, regcache,
2772 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2773 buf);
2774
2775 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2776 return aarch64_pseudo_write_1 (gdbarch, regcache,
2777 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2778 buf);
2779
2780 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2781 return aarch64_pseudo_write_1 (gdbarch, regcache,
2782 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2783 buf);
2784
2785 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2786 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2787 return aarch64_pseudo_write_1 (gdbarch, regcache,
2788 regnum - AARCH64_SVE_V0_REGNUM,
2789 V_REGISTER_SIZE, buf);
2790
2791 gdb_assert_not_reached ("regnum out of bound");
2792 }
2793
2794 /* Callback function for user_reg_add. */
2795
2796 static struct value *
2797 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2798 {
2799 const int *reg_p = (const int *) baton;
2800
2801 return value_of_register (*reg_p, frame);
2802 }
2803 \f
2804
2805 /* Implement the "software_single_step" gdbarch method, needed to
2806 single step through atomic sequences on AArch64. */
2807
2808 static std::vector<CORE_ADDR>
2809 aarch64_software_single_step (struct regcache *regcache)
2810 {
2811 struct gdbarch *gdbarch = regcache->arch ();
2812 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2813 const int insn_size = 4;
2814 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2815 CORE_ADDR pc = regcache_read_pc (regcache);
2816 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2817 CORE_ADDR loc = pc;
2818 CORE_ADDR closing_insn = 0;
2819 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2820 byte_order_for_code);
2821 int index;
2822 int insn_count;
2823 int bc_insn_count = 0; /* Conditional branch instruction count. */
2824 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2825 aarch64_inst inst;
2826
2827 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2828 return {};
2829
2830 /* Look for a Load Exclusive instruction which begins the sequence. */
2831 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2832 return {};
2833
2834 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2835 {
2836 loc += insn_size;
2837 insn = read_memory_unsigned_integer (loc, insn_size,
2838 byte_order_for_code);
2839
2840 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2841 return {};
2842 /* Check if the instruction is a conditional branch. */
2843 if (inst.opcode->iclass == condbranch)
2844 {
2845 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2846
2847 if (bc_insn_count >= 1)
2848 return {};
2849
2850 /* It is, so we'll try to set a breakpoint at the destination. */
2851 breaks[1] = loc + inst.operands[0].imm.value;
2852
2853 bc_insn_count++;
2854 last_breakpoint++;
2855 }
2856
2857 /* Look for the Store Exclusive which closes the atomic sequence. */
2858 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2859 {
2860 closing_insn = loc;
2861 break;
2862 }
2863 }
2864
2865 /* We didn't find a closing Store Exclusive instruction, fall back. */
2866 if (!closing_insn)
2867 return {};
2868
2869 /* Insert breakpoint after the end of the atomic sequence. */
2870 breaks[0] = loc + insn_size;
2871
2872 /* Check for duplicated breakpoints, and also check that the second
2873 breakpoint is not within the atomic sequence. */
2874 if (last_breakpoint
2875 && (breaks[1] == breaks[0]
2876 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2877 last_breakpoint = 0;
2878
2879 std::vector<CORE_ADDR> next_pcs;
2880
2881 /* Insert the breakpoint at the end of the sequence, and one at the
2882 destination of the conditional branch, if it exists. */
2883 for (index = 0; index <= last_breakpoint; index++)
2884 next_pcs.push_back (breaks[index]);
2885
2886 return next_pcs;
2887 }
2888
2889 struct aarch64_displaced_step_copy_insn_closure
2890 : public displaced_step_copy_insn_closure
2891 {
2892 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2893 is being displaced stepping. */
2894 bool cond = false;
2895
2896 /* PC adjustment offset after displaced stepping. If 0, then we don't
2897 write the PC back, assuming the PC is already the right address. */
2898 int32_t pc_adjust = 0;
2899 };
2900
2901 /* Data when visiting instructions for displaced stepping. */
2902
2903 struct aarch64_displaced_step_data
2904 {
2905 struct aarch64_insn_data base;
2906
2907 /* The address where the instruction will be executed at. */
2908 CORE_ADDR new_addr;
2909 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2910 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2911 /* Number of instructions in INSN_BUF. */
2912 unsigned insn_count;
2913 /* Registers when doing displaced stepping. */
2914 struct regcache *regs;
2915
2916 aarch64_displaced_step_copy_insn_closure *dsc;
2917 };
2918
2919 /* Implementation of aarch64_insn_visitor method "b". */
2920
2921 static void
2922 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2923 struct aarch64_insn_data *data)
2924 {
2925 struct aarch64_displaced_step_data *dsd
2926 = (struct aarch64_displaced_step_data *) data;
2927 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2928
2929 if (can_encode_int32 (new_offset, 28))
2930 {
2931 /* Emit B rather than BL, because executing BL on a new address
2932 will get the wrong address into LR. In order to avoid this,
2933 we emit B, and update LR if the instruction is BL. */
2934 emit_b (dsd->insn_buf, 0, new_offset);
2935 dsd->insn_count++;
2936 }
2937 else
2938 {
2939 /* Write NOP. */
2940 emit_nop (dsd->insn_buf);
2941 dsd->insn_count++;
2942 dsd->dsc->pc_adjust = offset;
2943 }
2944
2945 if (is_bl)
2946 {
2947 /* Update LR. */
2948 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2949 data->insn_addr + 4);
2950 }
2951 }
2952
2953 /* Implementation of aarch64_insn_visitor method "b_cond". */
2954
2955 static void
2956 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2957 struct aarch64_insn_data *data)
2958 {
2959 struct aarch64_displaced_step_data *dsd
2960 = (struct aarch64_displaced_step_data *) data;
2961
2962 /* GDB has to fix up PC after displaced step this instruction
2963 differently according to the condition is true or false. Instead
2964 of checking COND against conditional flags, we can use
2965 the following instructions, and GDB can tell how to fix up PC
2966 according to the PC value.
2967
2968 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2969 INSN1 ;
2970 TAKEN:
2971 INSN2
2972 */
2973
2974 emit_bcond (dsd->insn_buf, cond, 8);
2975 dsd->dsc->cond = true;
2976 dsd->dsc->pc_adjust = offset;
2977 dsd->insn_count = 1;
2978 }
2979
2980 /* Dynamically allocate a new register. If we know the register
2981 statically, we should make it a global as above instead of using this
2982 helper function. */
2983
2984 static struct aarch64_register
2985 aarch64_register (unsigned num, int is64)
2986 {
2987 return (struct aarch64_register) { num, is64 };
2988 }
2989
2990 /* Implementation of aarch64_insn_visitor method "cb". */
2991
2992 static void
2993 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2994 const unsigned rn, int is64,
2995 struct aarch64_insn_data *data)
2996 {
2997 struct aarch64_displaced_step_data *dsd
2998 = (struct aarch64_displaced_step_data *) data;
2999
3000 /* The offset is out of range for a compare and branch
3001 instruction. We can use the following instructions instead:
3002
3003 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3004 INSN1 ;
3005 TAKEN:
3006 INSN2
3007 */
3008 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3009 dsd->insn_count = 1;
3010 dsd->dsc->cond = true;
3011 dsd->dsc->pc_adjust = offset;
3012 }
3013
3014 /* Implementation of aarch64_insn_visitor method "tb". */
3015
3016 static void
3017 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3018 const unsigned rt, unsigned bit,
3019 struct aarch64_insn_data *data)
3020 {
3021 struct aarch64_displaced_step_data *dsd
3022 = (struct aarch64_displaced_step_data *) data;
3023
3024 /* The offset is out of range for a test bit and branch
3025 instruction We can use the following instructions instead:
3026
3027 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3028 INSN1 ;
3029 TAKEN:
3030 INSN2
3031
3032 */
3033 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3034 dsd->insn_count = 1;
3035 dsd->dsc->cond = true;
3036 dsd->dsc->pc_adjust = offset;
3037 }
3038
3039 /* Implementation of aarch64_insn_visitor method "adr". */
3040
3041 static void
3042 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3043 const int is_adrp, struct aarch64_insn_data *data)
3044 {
3045 struct aarch64_displaced_step_data *dsd
3046 = (struct aarch64_displaced_step_data *) data;
3047 /* We know exactly the address the ADR{P,} instruction will compute.
3048 We can just write it to the destination register. */
3049 CORE_ADDR address = data->insn_addr + offset;
3050
3051 if (is_adrp)
3052 {
3053 /* Clear the lower 12 bits of the offset to get the 4K page. */
3054 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3055 address & ~0xfff);
3056 }
3057 else
3058 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3059 address);
3060
3061 dsd->dsc->pc_adjust = 4;
3062 emit_nop (dsd->insn_buf);
3063 dsd->insn_count = 1;
3064 }
3065
3066 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3067
3068 static void
3069 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3070 const unsigned rt, const int is64,
3071 struct aarch64_insn_data *data)
3072 {
3073 struct aarch64_displaced_step_data *dsd
3074 = (struct aarch64_displaced_step_data *) data;
3075 CORE_ADDR address = data->insn_addr + offset;
3076 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3077
3078 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3079 address);
3080
3081 if (is_sw)
3082 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3083 aarch64_register (rt, 1), zero);
3084 else
3085 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3086 aarch64_register (rt, 1), zero);
3087
3088 dsd->dsc->pc_adjust = 4;
3089 }
3090
3091 /* Implementation of aarch64_insn_visitor method "others". */
3092
3093 static void
3094 aarch64_displaced_step_others (const uint32_t insn,
3095 struct aarch64_insn_data *data)
3096 {
3097 struct aarch64_displaced_step_data *dsd
3098 = (struct aarch64_displaced_step_data *) data;
3099
3100 aarch64_emit_insn (dsd->insn_buf, insn);
3101 dsd->insn_count = 1;
3102
3103 if ((insn & 0xfffffc1f) == 0xd65f0000)
3104 {
3105 /* RET */
3106 dsd->dsc->pc_adjust = 0;
3107 }
3108 else
3109 dsd->dsc->pc_adjust = 4;
3110 }
3111
3112 static const struct aarch64_insn_visitor visitor =
3113 {
3114 aarch64_displaced_step_b,
3115 aarch64_displaced_step_b_cond,
3116 aarch64_displaced_step_cb,
3117 aarch64_displaced_step_tb,
3118 aarch64_displaced_step_adr,
3119 aarch64_displaced_step_ldr_literal,
3120 aarch64_displaced_step_others,
3121 };
3122
3123 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3124
3125 displaced_step_copy_insn_closure_up
3126 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3127 CORE_ADDR from, CORE_ADDR to,
3128 struct regcache *regs)
3129 {
3130 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3131 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3132 struct aarch64_displaced_step_data dsd;
3133 aarch64_inst inst;
3134
3135 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3136 return NULL;
3137
3138 /* Look for a Load Exclusive instruction which begins the sequence. */
3139 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3140 {
3141 /* We can't displaced step atomic sequences. */
3142 return NULL;
3143 }
3144
3145 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3146 (new aarch64_displaced_step_copy_insn_closure);
3147 dsd.base.insn_addr = from;
3148 dsd.new_addr = to;
3149 dsd.regs = regs;
3150 dsd.dsc = dsc.get ();
3151 dsd.insn_count = 0;
3152 aarch64_relocate_instruction (insn, &visitor,
3153 (struct aarch64_insn_data *) &dsd);
3154 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3155
3156 if (dsd.insn_count != 0)
3157 {
3158 int i;
3159
3160 /* Instruction can be relocated to scratch pad. Copy
3161 relocated instruction(s) there. */
3162 for (i = 0; i < dsd.insn_count; i++)
3163 {
3164 displaced_debug_printf ("writing insn %.8x at %s",
3165 dsd.insn_buf[i],
3166 paddress (gdbarch, to + i * 4));
3167
3168 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3169 (ULONGEST) dsd.insn_buf[i]);
3170 }
3171 }
3172 else
3173 {
3174 dsc = NULL;
3175 }
3176
3177 /* This is a work around for a problem with g++ 4.8. */
3178 return displaced_step_copy_insn_closure_up (dsc.release ());
3179 }
3180
3181 /* Implement the "displaced_step_fixup" gdbarch method. */
3182
3183 void
3184 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3185 struct displaced_step_copy_insn_closure *dsc_,
3186 CORE_ADDR from, CORE_ADDR to,
3187 struct regcache *regs)
3188 {
3189 aarch64_displaced_step_copy_insn_closure *dsc
3190 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3191
3192 ULONGEST pc;
3193
3194 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3195
3196 displaced_debug_printf ("PC after stepping: %s (was %s).",
3197 paddress (gdbarch, pc), paddress (gdbarch, to));
3198
3199 if (dsc->cond)
3200 {
3201 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3202 dsc->pc_adjust);
3203
3204 if (pc - to == 8)
3205 {
3206 /* Condition is true. */
3207 }
3208 else if (pc - to == 4)
3209 {
3210 /* Condition is false. */
3211 dsc->pc_adjust = 4;
3212 }
3213 else
3214 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3215
3216 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3217 dsc->pc_adjust);
3218 }
3219
3220 displaced_debug_printf ("%s PC by %d",
3221 dsc->pc_adjust ? "adjusting" : "not adjusting",
3222 dsc->pc_adjust);
3223
3224 if (dsc->pc_adjust != 0)
3225 {
3226 /* Make sure the previous instruction was executed (that is, the PC
3227 has changed). If the PC didn't change, then discard the adjustment
3228 offset. Otherwise we may skip an instruction before its execution
3229 took place. */
3230 if ((pc - to) == 0)
3231 {
3232 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3233 dsc->pc_adjust = 0;
3234 }
3235
3236 displaced_debug_printf ("fixup: set PC to %s:%d",
3237 paddress (gdbarch, from), dsc->pc_adjust);
3238
3239 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3240 from + dsc->pc_adjust);
3241 }
3242 }
3243
3244 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3245
3246 bool
3247 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3248 {
3249 return true;
3250 }
3251
3252 /* Get the correct target description for the given VQ value.
3253 If VQ is zero then it is assumed SVE is not supported.
3254 (It is not possible to set VQ to zero on an SVE system). */
3255
3256 const target_desc *
3257 aarch64_read_description (uint64_t vq, bool pauth_p)
3258 {
3259 if (vq > AARCH64_MAX_SVE_VQ)
3260 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3261 AARCH64_MAX_SVE_VQ);
3262
3263 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3264
3265 if (tdesc == NULL)
3266 {
3267 tdesc = aarch64_create_target_description (vq, pauth_p);
3268 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3269 }
3270
3271 return tdesc;
3272 }
3273
3274 /* Return the VQ used when creating the target description TDESC. */
3275
3276 static uint64_t
3277 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3278 {
3279 const struct tdesc_feature *feature_sve;
3280
3281 if (!tdesc_has_registers (tdesc))
3282 return 0;
3283
3284 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3285
3286 if (feature_sve == nullptr)
3287 return 0;
3288
3289 uint64_t vl = tdesc_register_bitsize (feature_sve,
3290 aarch64_sve_register_names[0]) / 8;
3291 return sve_vq_from_vl (vl);
3292 }
3293
3294 /* Add all the expected register sets into GDBARCH. */
3295
3296 static void
3297 aarch64_add_reggroups (struct gdbarch *gdbarch)
3298 {
3299 reggroup_add (gdbarch, general_reggroup);
3300 reggroup_add (gdbarch, float_reggroup);
3301 reggroup_add (gdbarch, system_reggroup);
3302 reggroup_add (gdbarch, vector_reggroup);
3303 reggroup_add (gdbarch, all_reggroup);
3304 reggroup_add (gdbarch, save_reggroup);
3305 reggroup_add (gdbarch, restore_reggroup);
3306 }
3307
3308 /* Implement the "cannot_store_register" gdbarch method. */
3309
3310 static int
3311 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3312 {
3313 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3314
3315 if (!tdep->has_pauth ())
3316 return 0;
3317
3318 /* Pointer authentication registers are read-only. */
3319 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3320 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3321 }
3322
3323 /* Initialize the current architecture based on INFO. If possible,
3324 re-use an architecture from ARCHES, which is a list of
3325 architectures already created during this debugging session.
3326
3327 Called e.g. at program startup, when reading a core file, and when
3328 reading a binary file. */
3329
3330 static struct gdbarch *
3331 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3332 {
3333 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3334 const struct tdesc_feature *feature_pauth;
3335 bool valid_p = true;
3336 int i, num_regs = 0, num_pseudo_regs = 0;
3337 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3338
3339 /* Use the vector length passed via the target info. Here -1 is used for no
3340 SVE, and 0 is unset. If unset then use the vector length from the existing
3341 tdesc. */
3342 uint64_t vq = 0;
3343 if (info.id == (int *) -1)
3344 vq = 0;
3345 else if (info.id != 0)
3346 vq = (uint64_t) info.id;
3347 else
3348 vq = aarch64_get_tdesc_vq (info.target_desc);
3349
3350 if (vq > AARCH64_MAX_SVE_VQ)
3351 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3352 pulongest (vq), AARCH64_MAX_SVE_VQ);
3353
3354 /* If there is already a candidate, use it. */
3355 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3356 best_arch != nullptr;
3357 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3358 {
3359 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3360 if (tdep && tdep->vq == vq)
3361 return best_arch->gdbarch;
3362 }
3363
3364 /* Ensure we always have a target descriptor, and that it is for the given VQ
3365 value. */
3366 const struct target_desc *tdesc = info.target_desc;
3367 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3368 tdesc = aarch64_read_description (vq, false);
3369 gdb_assert (tdesc);
3370
3371 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3372 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3373 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3374 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3375
3376 if (feature_core == nullptr)
3377 return nullptr;
3378
3379 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3380
3381 /* Validate the description provides the mandatory core R registers
3382 and allocate their numbers. */
3383 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3384 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3385 AARCH64_X0_REGNUM + i,
3386 aarch64_r_register_names[i]);
3387
3388 num_regs = AARCH64_X0_REGNUM + i;
3389
3390 /* Add the V registers. */
3391 if (feature_fpu != nullptr)
3392 {
3393 if (feature_sve != nullptr)
3394 error (_("Program contains both fpu and SVE features."));
3395
3396 /* Validate the description provides the mandatory V registers
3397 and allocate their numbers. */
3398 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3399 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3400 AARCH64_V0_REGNUM + i,
3401 aarch64_v_register_names[i]);
3402
3403 num_regs = AARCH64_V0_REGNUM + i;
3404 }
3405
3406 /* Add the SVE registers. */
3407 if (feature_sve != nullptr)
3408 {
3409 /* Validate the description provides the mandatory SVE registers
3410 and allocate their numbers. */
3411 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3412 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3413 AARCH64_SVE_Z0_REGNUM + i,
3414 aarch64_sve_register_names[i]);
3415
3416 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3417 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3418 }
3419
3420 if (feature_fpu != nullptr || feature_sve != nullptr)
3421 {
3422 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3423 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3424 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3425 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3426 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3427 }
3428
3429 /* Add the pauth registers. */
3430 if (feature_pauth != NULL)
3431 {
3432 first_pauth_regnum = num_regs;
3433 pauth_ra_state_offset = num_pseudo_regs;
3434 /* Validate the descriptor provides the mandatory PAUTH registers and
3435 allocate their numbers. */
3436 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3437 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3438 first_pauth_regnum + i,
3439 aarch64_pauth_register_names[i]);
3440
3441 num_regs += i;
3442 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3443 }
3444
3445 if (!valid_p)
3446 return nullptr;
3447
3448 /* AArch64 code is always little-endian. */
3449 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3450
3451 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3452 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3453
3454 /* This should be low enough for everything. */
3455 tdep->lowest_pc = 0x20;
3456 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3457 tdep->jb_elt_size = 8;
3458 tdep->vq = vq;
3459 tdep->pauth_reg_base = first_pauth_regnum;
3460 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3461 : pauth_ra_state_offset + num_regs;
3462
3463 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3464 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3465
3466 /* Advance PC across function entry code. */
3467 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3468
3469 /* The stack grows downward. */
3470 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3471
3472 /* Breakpoint manipulation. */
3473 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3474 aarch64_breakpoint::kind_from_pc);
3475 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3476 aarch64_breakpoint::bp_from_kind);
3477 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3478 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3479
3480 /* Information about registers, etc. */
3481 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3482 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3483 set_gdbarch_num_regs (gdbarch, num_regs);
3484
3485 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3486 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3487 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3488 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3489 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3490 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3491 aarch64_pseudo_register_reggroup_p);
3492 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3493
3494 /* ABI */
3495 set_gdbarch_short_bit (gdbarch, 16);
3496 set_gdbarch_int_bit (gdbarch, 32);
3497 set_gdbarch_float_bit (gdbarch, 32);
3498 set_gdbarch_double_bit (gdbarch, 64);
3499 set_gdbarch_long_double_bit (gdbarch, 128);
3500 set_gdbarch_long_bit (gdbarch, 64);
3501 set_gdbarch_long_long_bit (gdbarch, 64);
3502 set_gdbarch_ptr_bit (gdbarch, 64);
3503 set_gdbarch_char_signed (gdbarch, 0);
3504 set_gdbarch_wchar_signed (gdbarch, 0);
3505 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3506 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3507 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3508 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3509
3510 /* Internal <-> external register number maps. */
3511 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3512
3513 /* Returning results. */
3514 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3515
3516 /* Disassembly. */
3517 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3518
3519 /* Virtual tables. */
3520 set_gdbarch_vbit_in_delta (gdbarch, 1);
3521
3522 /* Register architecture. */
3523 aarch64_add_reggroups (gdbarch);
3524
3525 /* Hook in the ABI-specific overrides, if they have been registered. */
3526 info.target_desc = tdesc;
3527 info.tdesc_data = tdesc_data.get ();
3528 gdbarch_init_osabi (info, gdbarch);
3529
3530 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3531 /* Register DWARF CFA vendor handler. */
3532 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3533 aarch64_execute_dwarf_cfa_vendor_op);
3534
3535 /* Permanent/Program breakpoint handling. */
3536 set_gdbarch_program_breakpoint_here_p (gdbarch,
3537 aarch64_program_breakpoint_here_p);
3538
3539 /* Add some default predicates. */
3540 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3541 dwarf2_append_unwinders (gdbarch);
3542 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3543
3544 frame_base_set_default (gdbarch, &aarch64_normal_base);
3545
3546 /* Now we have tuned the configuration, set a few final things,
3547 based on what the OS ABI has told us. */
3548
3549 if (tdep->jb_pc >= 0)
3550 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3551
3552 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3553
3554 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3555
3556 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3557
3558 /* Add standard register aliases. */
3559 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3560 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3561 value_of_aarch64_user_reg,
3562 &aarch64_register_aliases[i].regnum);
3563
3564 register_aarch64_ravenscar_ops (gdbarch);
3565
3566 return gdbarch;
3567 }
3568
3569 static void
3570 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3571 {
3572 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3573
3574 if (tdep == NULL)
3575 return;
3576
3577 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3578 paddress (gdbarch, tdep->lowest_pc));
3579 }
3580
3581 #if GDB_SELF_TEST
3582 namespace selftests
3583 {
3584 static void aarch64_process_record_test (void);
3585 }
3586 #endif
3587
3588 void _initialize_aarch64_tdep ();
3589 void
3590 _initialize_aarch64_tdep ()
3591 {
3592 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3593 aarch64_dump_tdep);
3594
3595 /* Debug this file's internals. */
3596 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3597 Set AArch64 debugging."), _("\
3598 Show AArch64 debugging."), _("\
3599 When on, AArch64 specific debugging is enabled."),
3600 NULL,
3601 show_aarch64_debug,
3602 &setdebuglist, &showdebuglist);
3603
3604 #if GDB_SELF_TEST
3605 selftests::register_test ("aarch64-analyze-prologue",
3606 selftests::aarch64_analyze_prologue_test);
3607 selftests::register_test ("aarch64-process-record",
3608 selftests::aarch64_process_record_test);
3609 #endif
3610 }
3611
3612 /* AArch64 process record-replay related structures, defines etc. */
3613
3614 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3615 do \
3616 { \
3617 unsigned int reg_len = LENGTH; \
3618 if (reg_len) \
3619 { \
3620 REGS = XNEWVEC (uint32_t, reg_len); \
3621 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3622 } \
3623 } \
3624 while (0)
3625
3626 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3627 do \
3628 { \
3629 unsigned int mem_len = LENGTH; \
3630 if (mem_len) \
3631 { \
3632 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3633 memcpy(&MEMS->len, &RECORD_BUF[0], \
3634 sizeof(struct aarch64_mem_r) * LENGTH); \
3635 } \
3636 } \
3637 while (0)
3638
3639 /* AArch64 record/replay structures and enumerations. */
3640
3641 struct aarch64_mem_r
3642 {
3643 uint64_t len; /* Record length. */
3644 uint64_t addr; /* Memory address. */
3645 };
3646
3647 enum aarch64_record_result
3648 {
3649 AARCH64_RECORD_SUCCESS,
3650 AARCH64_RECORD_UNSUPPORTED,
3651 AARCH64_RECORD_UNKNOWN
3652 };
3653
3654 typedef struct insn_decode_record_t
3655 {
3656 struct gdbarch *gdbarch;
3657 struct regcache *regcache;
3658 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3659 uint32_t aarch64_insn; /* Insn to be recorded. */
3660 uint32_t mem_rec_count; /* Count of memory records. */
3661 uint32_t reg_rec_count; /* Count of register records. */
3662 uint32_t *aarch64_regs; /* Registers to be recorded. */
3663 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3664 } insn_decode_record;
3665
3666 /* Record handler for data processing - register instructions. */
3667
3668 static unsigned int
3669 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3670 {
3671 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3672 uint32_t record_buf[4];
3673
3674 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3675 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3676 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3677
3678 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3679 {
3680 uint8_t setflags;
3681
3682 /* Logical (shifted register). */
3683 if (insn_bits24_27 == 0x0a)
3684 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3685 /* Add/subtract. */
3686 else if (insn_bits24_27 == 0x0b)
3687 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3688 else
3689 return AARCH64_RECORD_UNKNOWN;
3690
3691 record_buf[0] = reg_rd;
3692 aarch64_insn_r->reg_rec_count = 1;
3693 if (setflags)
3694 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3695 }
3696 else
3697 {
3698 if (insn_bits24_27 == 0x0b)
3699 {
3700 /* Data-processing (3 source). */
3701 record_buf[0] = reg_rd;
3702 aarch64_insn_r->reg_rec_count = 1;
3703 }
3704 else if (insn_bits24_27 == 0x0a)
3705 {
3706 if (insn_bits21_23 == 0x00)
3707 {
3708 /* Add/subtract (with carry). */
3709 record_buf[0] = reg_rd;
3710 aarch64_insn_r->reg_rec_count = 1;
3711 if (bit (aarch64_insn_r->aarch64_insn, 29))
3712 {
3713 record_buf[1] = AARCH64_CPSR_REGNUM;
3714 aarch64_insn_r->reg_rec_count = 2;
3715 }
3716 }
3717 else if (insn_bits21_23 == 0x02)
3718 {
3719 /* Conditional compare (register) and conditional compare
3720 (immediate) instructions. */
3721 record_buf[0] = AARCH64_CPSR_REGNUM;
3722 aarch64_insn_r->reg_rec_count = 1;
3723 }
3724 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3725 {
3726 /* Conditional select. */
3727 /* Data-processing (2 source). */
3728 /* Data-processing (1 source). */
3729 record_buf[0] = reg_rd;
3730 aarch64_insn_r->reg_rec_count = 1;
3731 }
3732 else
3733 return AARCH64_RECORD_UNKNOWN;
3734 }
3735 }
3736
3737 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3738 record_buf);
3739 return AARCH64_RECORD_SUCCESS;
3740 }
3741
3742 /* Record handler for data processing - immediate instructions. */
3743
3744 static unsigned int
3745 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3746 {
3747 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3748 uint32_t record_buf[4];
3749
3750 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3751 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3752 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3753
3754 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3755 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3756 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3757 {
3758 record_buf[0] = reg_rd;
3759 aarch64_insn_r->reg_rec_count = 1;
3760 }
3761 else if (insn_bits24_27 == 0x01)
3762 {
3763 /* Add/Subtract (immediate). */
3764 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3765 record_buf[0] = reg_rd;
3766 aarch64_insn_r->reg_rec_count = 1;
3767 if (setflags)
3768 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3769 }
3770 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3771 {
3772 /* Logical (immediate). */
3773 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3774 record_buf[0] = reg_rd;
3775 aarch64_insn_r->reg_rec_count = 1;
3776 if (setflags)
3777 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3778 }
3779 else
3780 return AARCH64_RECORD_UNKNOWN;
3781
3782 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3783 record_buf);
3784 return AARCH64_RECORD_SUCCESS;
3785 }
3786
3787 /* Record handler for branch, exception generation and system instructions. */
3788
3789 static unsigned int
3790 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3791 {
3792 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3793 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3794 uint32_t record_buf[4];
3795
3796 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3797 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3798 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3799
3800 if (insn_bits28_31 == 0x0d)
3801 {
3802 /* Exception generation instructions. */
3803 if (insn_bits24_27 == 0x04)
3804 {
3805 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3806 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3807 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3808 {
3809 ULONGEST svc_number;
3810
3811 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3812 &svc_number);
3813 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3814 svc_number);
3815 }
3816 else
3817 return AARCH64_RECORD_UNSUPPORTED;
3818 }
3819 /* System instructions. */
3820 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3821 {
3822 uint32_t reg_rt, reg_crn;
3823
3824 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3825 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3826
3827 /* Record rt in case of sysl and mrs instructions. */
3828 if (bit (aarch64_insn_r->aarch64_insn, 21))
3829 {
3830 record_buf[0] = reg_rt;
3831 aarch64_insn_r->reg_rec_count = 1;
3832 }
3833 /* Record cpsr for hint and msr(immediate) instructions. */
3834 else if (reg_crn == 0x02 || reg_crn == 0x04)
3835 {
3836 record_buf[0] = AARCH64_CPSR_REGNUM;
3837 aarch64_insn_r->reg_rec_count = 1;
3838 }
3839 }
3840 /* Unconditional branch (register). */
3841 else if((insn_bits24_27 & 0x0e) == 0x06)
3842 {
3843 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3844 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3845 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3846 }
3847 else
3848 return AARCH64_RECORD_UNKNOWN;
3849 }
3850 /* Unconditional branch (immediate). */
3851 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3852 {
3853 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3854 if (bit (aarch64_insn_r->aarch64_insn, 31))
3855 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3856 }
3857 else
3858 /* Compare & branch (immediate), Test & branch (immediate) and
3859 Conditional branch (immediate). */
3860 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3861
3862 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3863 record_buf);
3864 return AARCH64_RECORD_SUCCESS;
3865 }
3866
3867 /* Record handler for advanced SIMD load and store instructions. */
3868
3869 static unsigned int
3870 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3871 {
3872 CORE_ADDR address;
3873 uint64_t addr_offset = 0;
3874 uint32_t record_buf[24];
3875 uint64_t record_buf_mem[24];
3876 uint32_t reg_rn, reg_rt;
3877 uint32_t reg_index = 0, mem_index = 0;
3878 uint8_t opcode_bits, size_bits;
3879
3880 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3881 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3882 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3883 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3884 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3885
3886 if (record_debug)
3887 debug_printf ("Process record: Advanced SIMD load/store\n");
3888
3889 /* Load/store single structure. */
3890 if (bit (aarch64_insn_r->aarch64_insn, 24))
3891 {
3892 uint8_t sindex, scale, selem, esize, replicate = 0;
3893 scale = opcode_bits >> 2;
3894 selem = ((opcode_bits & 0x02) |
3895 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3896 switch (scale)
3897 {
3898 case 1:
3899 if (size_bits & 0x01)
3900 return AARCH64_RECORD_UNKNOWN;
3901 break;
3902 case 2:
3903 if ((size_bits >> 1) & 0x01)
3904 return AARCH64_RECORD_UNKNOWN;
3905 if (size_bits & 0x01)
3906 {
3907 if (!((opcode_bits >> 1) & 0x01))
3908 scale = 3;
3909 else
3910 return AARCH64_RECORD_UNKNOWN;
3911 }
3912 break;
3913 case 3:
3914 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3915 {
3916 scale = size_bits;
3917 replicate = 1;
3918 break;
3919 }
3920 else
3921 return AARCH64_RECORD_UNKNOWN;
3922 default:
3923 break;
3924 }
3925 esize = 8 << scale;
3926 if (replicate)
3927 for (sindex = 0; sindex < selem; sindex++)
3928 {
3929 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3930 reg_rt = (reg_rt + 1) % 32;
3931 }
3932 else
3933 {
3934 for (sindex = 0; sindex < selem; sindex++)
3935 {
3936 if (bit (aarch64_insn_r->aarch64_insn, 22))
3937 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3938 else
3939 {
3940 record_buf_mem[mem_index++] = esize / 8;
3941 record_buf_mem[mem_index++] = address + addr_offset;
3942 }
3943 addr_offset = addr_offset + (esize / 8);
3944 reg_rt = (reg_rt + 1) % 32;
3945 }
3946 }
3947 }
3948 /* Load/store multiple structure. */
3949 else
3950 {
3951 uint8_t selem, esize, rpt, elements;
3952 uint8_t eindex, rindex;
3953
3954 esize = 8 << size_bits;
3955 if (bit (aarch64_insn_r->aarch64_insn, 30))
3956 elements = 128 / esize;
3957 else
3958 elements = 64 / esize;
3959
3960 switch (opcode_bits)
3961 {
3962 /*LD/ST4 (4 Registers). */
3963 case 0:
3964 rpt = 1;
3965 selem = 4;
3966 break;
3967 /*LD/ST1 (4 Registers). */
3968 case 2:
3969 rpt = 4;
3970 selem = 1;
3971 break;
3972 /*LD/ST3 (3 Registers). */
3973 case 4:
3974 rpt = 1;
3975 selem = 3;
3976 break;
3977 /*LD/ST1 (3 Registers). */
3978 case 6:
3979 rpt = 3;
3980 selem = 1;
3981 break;
3982 /*LD/ST1 (1 Register). */
3983 case 7:
3984 rpt = 1;
3985 selem = 1;
3986 break;
3987 /*LD/ST2 (2 Registers). */
3988 case 8:
3989 rpt = 1;
3990 selem = 2;
3991 break;
3992 /*LD/ST1 (2 Registers). */
3993 case 10:
3994 rpt = 2;
3995 selem = 1;
3996 break;
3997 default:
3998 return AARCH64_RECORD_UNSUPPORTED;
3999 break;
4000 }
4001 for (rindex = 0; rindex < rpt; rindex++)
4002 for (eindex = 0; eindex < elements; eindex++)
4003 {
4004 uint8_t reg_tt, sindex;
4005 reg_tt = (reg_rt + rindex) % 32;
4006 for (sindex = 0; sindex < selem; sindex++)
4007 {
4008 if (bit (aarch64_insn_r->aarch64_insn, 22))
4009 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4010 else
4011 {
4012 record_buf_mem[mem_index++] = esize / 8;
4013 record_buf_mem[mem_index++] = address + addr_offset;
4014 }
4015 addr_offset = addr_offset + (esize / 8);
4016 reg_tt = (reg_tt + 1) % 32;
4017 }
4018 }
4019 }
4020
4021 if (bit (aarch64_insn_r->aarch64_insn, 23))
4022 record_buf[reg_index++] = reg_rn;
4023
4024 aarch64_insn_r->reg_rec_count = reg_index;
4025 aarch64_insn_r->mem_rec_count = mem_index / 2;
4026 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4027 record_buf_mem);
4028 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4029 record_buf);
4030 return AARCH64_RECORD_SUCCESS;
4031 }
4032
4033 /* Record handler for load and store instructions. */
4034
4035 static unsigned int
4036 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4037 {
4038 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4039 uint8_t insn_bit23, insn_bit21;
4040 uint8_t opc, size_bits, ld_flag, vector_flag;
4041 uint32_t reg_rn, reg_rt, reg_rt2;
4042 uint64_t datasize, offset;
4043 uint32_t record_buf[8];
4044 uint64_t record_buf_mem[8];
4045 CORE_ADDR address;
4046
4047 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4048 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4049 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4050 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4051 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4052 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4053 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4054 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4055 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4056 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4057 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4058
4059 /* Load/store exclusive. */
4060 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4061 {
4062 if (record_debug)
4063 debug_printf ("Process record: load/store exclusive\n");
4064
4065 if (ld_flag)
4066 {
4067 record_buf[0] = reg_rt;
4068 aarch64_insn_r->reg_rec_count = 1;
4069 if (insn_bit21)
4070 {
4071 record_buf[1] = reg_rt2;
4072 aarch64_insn_r->reg_rec_count = 2;
4073 }
4074 }
4075 else
4076 {
4077 if (insn_bit21)
4078 datasize = (8 << size_bits) * 2;
4079 else
4080 datasize = (8 << size_bits);
4081 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4082 &address);
4083 record_buf_mem[0] = datasize / 8;
4084 record_buf_mem[1] = address;
4085 aarch64_insn_r->mem_rec_count = 1;
4086 if (!insn_bit23)
4087 {
4088 /* Save register rs. */
4089 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4090 aarch64_insn_r->reg_rec_count = 1;
4091 }
4092 }
4093 }
4094 /* Load register (literal) instructions decoding. */
4095 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4096 {
4097 if (record_debug)
4098 debug_printf ("Process record: load register (literal)\n");
4099 if (vector_flag)
4100 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4101 else
4102 record_buf[0] = reg_rt;
4103 aarch64_insn_r->reg_rec_count = 1;
4104 }
4105 /* All types of load/store pair instructions decoding. */
4106 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4107 {
4108 if (record_debug)
4109 debug_printf ("Process record: load/store pair\n");
4110
4111 if (ld_flag)
4112 {
4113 if (vector_flag)
4114 {
4115 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4116 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4117 }
4118 else
4119 {
4120 record_buf[0] = reg_rt;
4121 record_buf[1] = reg_rt2;
4122 }
4123 aarch64_insn_r->reg_rec_count = 2;
4124 }
4125 else
4126 {
4127 uint16_t imm7_off;
4128 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4129 if (!vector_flag)
4130 size_bits = size_bits >> 1;
4131 datasize = 8 << (2 + size_bits);
4132 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4133 offset = offset << (2 + size_bits);
4134 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4135 &address);
4136 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4137 {
4138 if (imm7_off & 0x40)
4139 address = address - offset;
4140 else
4141 address = address + offset;
4142 }
4143
4144 record_buf_mem[0] = datasize / 8;
4145 record_buf_mem[1] = address;
4146 record_buf_mem[2] = datasize / 8;
4147 record_buf_mem[3] = address + (datasize / 8);
4148 aarch64_insn_r->mem_rec_count = 2;
4149 }
4150 if (bit (aarch64_insn_r->aarch64_insn, 23))
4151 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4152 }
4153 /* Load/store register (unsigned immediate) instructions. */
4154 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4155 {
4156 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4157 if (!(opc >> 1))
4158 {
4159 if (opc & 0x01)
4160 ld_flag = 0x01;
4161 else
4162 ld_flag = 0x0;
4163 }
4164 else
4165 {
4166 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4167 {
4168 /* PRFM (immediate) */
4169 return AARCH64_RECORD_SUCCESS;
4170 }
4171 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4172 {
4173 /* LDRSW (immediate) */
4174 ld_flag = 0x1;
4175 }
4176 else
4177 {
4178 if (opc & 0x01)
4179 ld_flag = 0x01;
4180 else
4181 ld_flag = 0x0;
4182 }
4183 }
4184
4185 if (record_debug)
4186 {
4187 debug_printf ("Process record: load/store (unsigned immediate):"
4188 " size %x V %d opc %x\n", size_bits, vector_flag,
4189 opc);
4190 }
4191
4192 if (!ld_flag)
4193 {
4194 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4195 datasize = 8 << size_bits;
4196 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4197 &address);
4198 offset = offset << size_bits;
4199 address = address + offset;
4200
4201 record_buf_mem[0] = datasize >> 3;
4202 record_buf_mem[1] = address;
4203 aarch64_insn_r->mem_rec_count = 1;
4204 }
4205 else
4206 {
4207 if (vector_flag)
4208 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4209 else
4210 record_buf[0] = reg_rt;
4211 aarch64_insn_r->reg_rec_count = 1;
4212 }
4213 }
4214 /* Load/store register (register offset) instructions. */
4215 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4216 && insn_bits10_11 == 0x02 && insn_bit21)
4217 {
4218 if (record_debug)
4219 debug_printf ("Process record: load/store (register offset)\n");
4220 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4221 if (!(opc >> 1))
4222 if (opc & 0x01)
4223 ld_flag = 0x01;
4224 else
4225 ld_flag = 0x0;
4226 else
4227 if (size_bits != 0x03)
4228 ld_flag = 0x01;
4229 else
4230 return AARCH64_RECORD_UNKNOWN;
4231
4232 if (!ld_flag)
4233 {
4234 ULONGEST reg_rm_val;
4235
4236 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4237 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4238 if (bit (aarch64_insn_r->aarch64_insn, 12))
4239 offset = reg_rm_val << size_bits;
4240 else
4241 offset = reg_rm_val;
4242 datasize = 8 << size_bits;
4243 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4244 &address);
4245 address = address + offset;
4246 record_buf_mem[0] = datasize >> 3;
4247 record_buf_mem[1] = address;
4248 aarch64_insn_r->mem_rec_count = 1;
4249 }
4250 else
4251 {
4252 if (vector_flag)
4253 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4254 else
4255 record_buf[0] = reg_rt;
4256 aarch64_insn_r->reg_rec_count = 1;
4257 }
4258 }
4259 /* Load/store register (immediate and unprivileged) instructions. */
4260 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4261 && !insn_bit21)
4262 {
4263 if (record_debug)
4264 {
4265 debug_printf ("Process record: load/store "
4266 "(immediate and unprivileged)\n");
4267 }
4268 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4269 if (!(opc >> 1))
4270 if (opc & 0x01)
4271 ld_flag = 0x01;
4272 else
4273 ld_flag = 0x0;
4274 else
4275 if (size_bits != 0x03)
4276 ld_flag = 0x01;
4277 else
4278 return AARCH64_RECORD_UNKNOWN;
4279
4280 if (!ld_flag)
4281 {
4282 uint16_t imm9_off;
4283 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4284 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4285 datasize = 8 << size_bits;
4286 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4287 &address);
4288 if (insn_bits10_11 != 0x01)
4289 {
4290 if (imm9_off & 0x0100)
4291 address = address - offset;
4292 else
4293 address = address + offset;
4294 }
4295 record_buf_mem[0] = datasize >> 3;
4296 record_buf_mem[1] = address;
4297 aarch64_insn_r->mem_rec_count = 1;
4298 }
4299 else
4300 {
4301 if (vector_flag)
4302 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4303 else
4304 record_buf[0] = reg_rt;
4305 aarch64_insn_r->reg_rec_count = 1;
4306 }
4307 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4308 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4309 }
4310 /* Advanced SIMD load/store instructions. */
4311 else
4312 return aarch64_record_asimd_load_store (aarch64_insn_r);
4313
4314 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4315 record_buf_mem);
4316 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4317 record_buf);
4318 return AARCH64_RECORD_SUCCESS;
4319 }
4320
4321 /* Record handler for data processing SIMD and floating point instructions. */
4322
4323 static unsigned int
4324 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4325 {
4326 uint8_t insn_bit21, opcode, rmode, reg_rd;
4327 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4328 uint8_t insn_bits11_14;
4329 uint32_t record_buf[2];
4330
4331 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4332 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4333 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4334 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4335 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4336 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4337 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4338 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4339 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4340
4341 if (record_debug)
4342 debug_printf ("Process record: data processing SIMD/FP: ");
4343
4344 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4345 {
4346 /* Floating point - fixed point conversion instructions. */
4347 if (!insn_bit21)
4348 {
4349 if (record_debug)
4350 debug_printf ("FP - fixed point conversion");
4351
4352 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4353 record_buf[0] = reg_rd;
4354 else
4355 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4356 }
4357 /* Floating point - conditional compare instructions. */
4358 else if (insn_bits10_11 == 0x01)
4359 {
4360 if (record_debug)
4361 debug_printf ("FP - conditional compare");
4362
4363 record_buf[0] = AARCH64_CPSR_REGNUM;
4364 }
4365 /* Floating point - data processing (2-source) and
4366 conditional select instructions. */
4367 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4368 {
4369 if (record_debug)
4370 debug_printf ("FP - DP (2-source)");
4371
4372 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4373 }
4374 else if (insn_bits10_11 == 0x00)
4375 {
4376 /* Floating point - immediate instructions. */
4377 if ((insn_bits12_15 & 0x01) == 0x01
4378 || (insn_bits12_15 & 0x07) == 0x04)
4379 {
4380 if (record_debug)
4381 debug_printf ("FP - immediate");
4382 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4383 }
4384 /* Floating point - compare instructions. */
4385 else if ((insn_bits12_15 & 0x03) == 0x02)
4386 {
4387 if (record_debug)
4388 debug_printf ("FP - immediate");
4389 record_buf[0] = AARCH64_CPSR_REGNUM;
4390 }
4391 /* Floating point - integer conversions instructions. */
4392 else if (insn_bits12_15 == 0x00)
4393 {
4394 /* Convert float to integer instruction. */
4395 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4396 {
4397 if (record_debug)
4398 debug_printf ("float to int conversion");
4399
4400 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4401 }
4402 /* Convert integer to float instruction. */
4403 else if ((opcode >> 1) == 0x01 && !rmode)
4404 {
4405 if (record_debug)
4406 debug_printf ("int to float conversion");
4407
4408 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4409 }
4410 /* Move float to integer instruction. */
4411 else if ((opcode >> 1) == 0x03)
4412 {
4413 if (record_debug)
4414 debug_printf ("move float to int");
4415
4416 if (!(opcode & 0x01))
4417 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4418 else
4419 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4420 }
4421 else
4422 return AARCH64_RECORD_UNKNOWN;
4423 }
4424 else
4425 return AARCH64_RECORD_UNKNOWN;
4426 }
4427 else
4428 return AARCH64_RECORD_UNKNOWN;
4429 }
4430 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4431 {
4432 if (record_debug)
4433 debug_printf ("SIMD copy");
4434
4435 /* Advanced SIMD copy instructions. */
4436 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4437 && !bit (aarch64_insn_r->aarch64_insn, 15)
4438 && bit (aarch64_insn_r->aarch64_insn, 10))
4439 {
4440 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4441 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4442 else
4443 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4444 }
4445 else
4446 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4447 }
4448 /* All remaining floating point or advanced SIMD instructions. */
4449 else
4450 {
4451 if (record_debug)
4452 debug_printf ("all remain");
4453
4454 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4455 }
4456
4457 if (record_debug)
4458 debug_printf ("\n");
4459
4460 /* Record the V/X register. */
4461 aarch64_insn_r->reg_rec_count++;
4462
4463 /* Some of these instructions may set bits in the FPSR, so record it
4464 too. */
4465 record_buf[1] = AARCH64_FPSR_REGNUM;
4466 aarch64_insn_r->reg_rec_count++;
4467
4468 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4469 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4470 record_buf);
4471 return AARCH64_RECORD_SUCCESS;
4472 }
4473
4474 /* Decodes insns type and invokes its record handler. */
4475
4476 static unsigned int
4477 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4478 {
4479 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4480
4481 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4482 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4483 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4484 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4485
4486 /* Data processing - immediate instructions. */
4487 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4488 return aarch64_record_data_proc_imm (aarch64_insn_r);
4489
4490 /* Branch, exception generation and system instructions. */
4491 if (ins_bit26 && !ins_bit27 && ins_bit28)
4492 return aarch64_record_branch_except_sys (aarch64_insn_r);
4493
4494 /* Load and store instructions. */
4495 if (!ins_bit25 && ins_bit27)
4496 return aarch64_record_load_store (aarch64_insn_r);
4497
4498 /* Data processing - register instructions. */
4499 if (ins_bit25 && !ins_bit26 && ins_bit27)
4500 return aarch64_record_data_proc_reg (aarch64_insn_r);
4501
4502 /* Data processing - SIMD and floating point instructions. */
4503 if (ins_bit25 && ins_bit26 && ins_bit27)
4504 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4505
4506 return AARCH64_RECORD_UNSUPPORTED;
4507 }
4508
4509 /* Cleans up local record registers and memory allocations. */
4510
4511 static void
4512 deallocate_reg_mem (insn_decode_record *record)
4513 {
4514 xfree (record->aarch64_regs);
4515 xfree (record->aarch64_mems);
4516 }
4517
4518 #if GDB_SELF_TEST
4519 namespace selftests {
4520
4521 static void
4522 aarch64_process_record_test (void)
4523 {
4524 struct gdbarch_info info;
4525 uint32_t ret;
4526
4527 gdbarch_info_init (&info);
4528 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4529
4530 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4531 SELF_CHECK (gdbarch != NULL);
4532
4533 insn_decode_record aarch64_record;
4534
4535 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4536 aarch64_record.regcache = NULL;
4537 aarch64_record.this_addr = 0;
4538 aarch64_record.gdbarch = gdbarch;
4539
4540 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4541 aarch64_record.aarch64_insn = 0xf9800020;
4542 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4543 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4544 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4545 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4546
4547 deallocate_reg_mem (&aarch64_record);
4548 }
4549
4550 } // namespace selftests
4551 #endif /* GDB_SELF_TEST */
4552
4553 /* Parse the current instruction and record the values of the registers and
4554 memory that will be changed in current instruction to record_arch_list
4555 return -1 if something is wrong. */
4556
4557 int
4558 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4559 CORE_ADDR insn_addr)
4560 {
4561 uint32_t rec_no = 0;
4562 uint8_t insn_size = 4;
4563 uint32_t ret = 0;
4564 gdb_byte buf[insn_size];
4565 insn_decode_record aarch64_record;
4566
4567 memset (&buf[0], 0, insn_size);
4568 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4569 target_read_memory (insn_addr, &buf[0], insn_size);
4570 aarch64_record.aarch64_insn
4571 = (uint32_t) extract_unsigned_integer (&buf[0],
4572 insn_size,
4573 gdbarch_byte_order (gdbarch));
4574 aarch64_record.regcache = regcache;
4575 aarch64_record.this_addr = insn_addr;
4576 aarch64_record.gdbarch = gdbarch;
4577
4578 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4579 if (ret == AARCH64_RECORD_UNSUPPORTED)
4580 {
4581 printf_unfiltered (_("Process record does not support instruction "
4582 "0x%0x at address %s.\n"),
4583 aarch64_record.aarch64_insn,
4584 paddress (gdbarch, insn_addr));
4585 ret = -1;
4586 }
4587
4588 if (0 == ret)
4589 {
4590 /* Record registers. */
4591 record_full_arch_list_add_reg (aarch64_record.regcache,
4592 AARCH64_PC_REGNUM);
4593 /* Always record register CPSR. */
4594 record_full_arch_list_add_reg (aarch64_record.regcache,
4595 AARCH64_CPSR_REGNUM);
4596 if (aarch64_record.aarch64_regs)
4597 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4598 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4599 aarch64_record.aarch64_regs[rec_no]))
4600 ret = -1;
4601
4602 /* Record memories. */
4603 if (aarch64_record.aarch64_mems)
4604 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4605 if (record_full_arch_list_add_mem
4606 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4607 aarch64_record.aarch64_mems[rec_no].len))
4608 ret = -1;
4609
4610 if (record_full_arch_list_add_end ())
4611 ret = -1;
4612 }
4613
4614 deallocate_reg_mem (&aarch64_record);
4615 return ret;
4616 }
This page took 0.118634 seconds and 5 git commands to generate.