Pass return_method to _push_dummy_call
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
50
51 #include "elf-bfd.h"
52 #include "elf/aarch64.h"
53
54 #include "vec.h"
55
56 #include "record.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
59
60 #include "opcode/aarch64.h"
61 #include <algorithm>
62
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 four members. */
69 #define HA_MAX_NUM_FLDS 4
70
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
73
74 /* The standard register names, and all the valid aliases for them. */
75 static const struct
76 {
77 const char *const name;
78 int regnum;
79 } aarch64_register_aliases[] =
80 {
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
85
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
118
119 /* specials */
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
122 };
123
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
126 {
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
137 "pc", "cpsr"
138 };
139
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
142 {
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
153 "fpsr",
154 "fpcr"
155 };
156
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
159 {
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
170 "fpsr", "fpcr",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
175 "ffr", "vg"
176 };
177
178 /* AArch64 prologue cache structure. */
179 struct aarch64_prologue_cache
180 {
181 /* The program counter at the start of the function. It is used to
182 identify this frame as a prologue frame. */
183 CORE_ADDR func;
184
185 /* The program counter at the time this frame was created; i.e. where
186 this function was called from. It is used to identify this frame as a
187 stub frame. */
188 CORE_ADDR prev_pc;
189
190 /* The stack pointer at the time this frame was created; i.e. the
191 caller's stack pointer when this function was called. It is used
192 to identify this frame. */
193 CORE_ADDR prev_sp;
194
195 /* Is the target available to read from? */
196 int available_p;
197
198 /* The frame base for this frame is just prev_sp - frame size.
199 FRAMESIZE is the distance from the frame pointer to the
200 initial stack pointer. */
201 int framesize;
202
203 /* The register used to hold the frame pointer for this frame. */
204 int framereg;
205
206 /* Saved register offsets. */
207 struct trad_frame_saved_reg *saved_regs;
208 };
209
210 static void
211 show_aarch64_debug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
213 {
214 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
215 }
216
217 namespace {
218
219 /* Abstract instruction reader. */
220
221 class abstract_instruction_reader
222 {
223 public:
224 /* Read in one instruction. */
225 virtual ULONGEST read (CORE_ADDR memaddr, int len,
226 enum bfd_endian byte_order) = 0;
227 };
228
229 /* Instruction reader from real target. */
230
231 class instruction_reader : public abstract_instruction_reader
232 {
233 public:
234 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
235 override
236 {
237 return read_code_unsigned_integer (memaddr, len, byte_order);
238 }
239 };
240
241 } // namespace
242
243 /* Analyze a prologue, looking for a recognizable stack frame
244 and frame pointer. Scan until we encounter a store that could
245 clobber the stack frame unexpectedly, or an unknown instruction. */
246
247 static CORE_ADDR
248 aarch64_analyze_prologue (struct gdbarch *gdbarch,
249 CORE_ADDR start, CORE_ADDR limit,
250 struct aarch64_prologue_cache *cache,
251 abstract_instruction_reader& reader)
252 {
253 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
254 int i;
255 /* Track X registers and D registers in prologue. */
256 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
257
258 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
259 regs[i] = pv_register (i, 0);
260 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
261
262 for (; start < limit; start += 4)
263 {
264 uint32_t insn;
265 aarch64_inst inst;
266
267 insn = reader.read (start, 4, byte_order_for_code);
268
269 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
270 break;
271
272 if (inst.opcode->iclass == addsub_imm
273 && (inst.opcode->op == OP_ADD
274 || strcmp ("sub", inst.opcode->name) == 0))
275 {
276 unsigned rd = inst.operands[0].reg.regno;
277 unsigned rn = inst.operands[1].reg.regno;
278
279 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
281 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
282 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
283
284 if (inst.opcode->op == OP_ADD)
285 {
286 regs[rd] = pv_add_constant (regs[rn],
287 inst.operands[2].imm.value);
288 }
289 else
290 {
291 regs[rd] = pv_add_constant (regs[rn],
292 -inst.operands[2].imm.value);
293 }
294 }
295 else if (inst.opcode->iclass == pcreladdr
296 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
297 {
298 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
299 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
300
301 regs[inst.operands[0].reg.regno] = pv_unknown ();
302 }
303 else if (inst.opcode->iclass == branch_imm)
304 {
305 /* Stop analysis on branch. */
306 break;
307 }
308 else if (inst.opcode->iclass == condbranch)
309 {
310 /* Stop analysis on branch. */
311 break;
312 }
313 else if (inst.opcode->iclass == branch_reg)
314 {
315 /* Stop analysis on branch. */
316 break;
317 }
318 else if (inst.opcode->iclass == compbranch)
319 {
320 /* Stop analysis on branch. */
321 break;
322 }
323 else if (inst.opcode->op == OP_MOVZ)
324 {
325 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
326 regs[inst.operands[0].reg.regno] = pv_unknown ();
327 }
328 else if (inst.opcode->iclass == log_shift
329 && strcmp (inst.opcode->name, "orr") == 0)
330 {
331 unsigned rd = inst.operands[0].reg.regno;
332 unsigned rn = inst.operands[1].reg.regno;
333 unsigned rm = inst.operands[2].reg.regno;
334
335 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
336 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
337 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
338
339 if (inst.operands[2].shifter.amount == 0
340 && rn == AARCH64_SP_REGNUM)
341 regs[rd] = regs[rm];
342 else
343 {
344 if (aarch64_debug)
345 {
346 debug_printf ("aarch64: prologue analysis gave up "
347 "addr=%s opcode=0x%x (orr x register)\n",
348 core_addr_to_string_nz (start), insn);
349 }
350 break;
351 }
352 }
353 else if (inst.opcode->op == OP_STUR)
354 {
355 unsigned rt = inst.operands[0].reg.regno;
356 unsigned rn = inst.operands[1].addr.base_regno;
357 int is64
358 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
359
360 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
361 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
363 gdb_assert (!inst.operands[1].addr.offset.is_reg);
364
365 stack.store (pv_add_constant (regs[rn],
366 inst.operands[1].addr.offset.imm),
367 is64 ? 8 : 4, regs[rt]);
368 }
369 else if ((inst.opcode->iclass == ldstpair_off
370 || (inst.opcode->iclass == ldstpair_indexed
371 && inst.operands[2].addr.preind))
372 && strcmp ("stp", inst.opcode->name) == 0)
373 {
374 /* STP with addressing mode Pre-indexed and Base register. */
375 unsigned rt1;
376 unsigned rt2;
377 unsigned rn = inst.operands[2].addr.base_regno;
378 int32_t imm = inst.operands[2].addr.offset.imm;
379
380 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
381 || inst.operands[0].type == AARCH64_OPND_Ft);
382 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
383 || inst.operands[1].type == AARCH64_OPND_Ft2);
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
385 gdb_assert (!inst.operands[2].addr.offset.is_reg);
386
387 /* If recording this store would invalidate the store area
388 (perhaps because rn is not known) then we should abandon
389 further prologue analysis. */
390 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
391 break;
392
393 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
394 break;
395
396 rt1 = inst.operands[0].reg.regno;
397 rt2 = inst.operands[1].reg.regno;
398 if (inst.operands[0].type == AARCH64_OPND_Ft)
399 {
400 /* Only bottom 64-bit of each V register (D register) need
401 to be preserved. */
402 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
403 rt1 += AARCH64_X_REGISTER_COUNT;
404 rt2 += AARCH64_X_REGISTER_COUNT;
405 }
406
407 stack.store (pv_add_constant (regs[rn], imm), 8,
408 regs[rt1]);
409 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
410 regs[rt2]);
411
412 if (inst.operands[2].addr.writeback)
413 regs[rn] = pv_add_constant (regs[rn], imm);
414
415 }
416 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
417 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
418 && (inst.opcode->op == OP_STR_POS
419 || inst.opcode->op == OP_STRF_POS)))
420 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
421 && strcmp ("str", inst.opcode->name) == 0)
422 {
423 /* STR (immediate) */
424 unsigned int rt = inst.operands[0].reg.regno;
425 int32_t imm = inst.operands[1].addr.offset.imm;
426 unsigned int rn = inst.operands[1].addr.base_regno;
427 bool is64
428 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
430 || inst.operands[0].type == AARCH64_OPND_Ft);
431
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
434 /* Only bottom 64-bit of each V register (D register) need
435 to be preserved. */
436 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
437 rt += AARCH64_X_REGISTER_COUNT;
438 }
439
440 stack.store (pv_add_constant (regs[rn], imm),
441 is64 ? 8 : 4, regs[rt]);
442 if (inst.operands[1].addr.writeback)
443 regs[rn] = pv_add_constant (regs[rn], imm);
444 }
445 else if (inst.opcode->iclass == testbranch)
446 {
447 /* Stop analysis on branch. */
448 break;
449 }
450 else
451 {
452 if (aarch64_debug)
453 {
454 debug_printf ("aarch64: prologue analysis gave up addr=%s"
455 " opcode=0x%x\n",
456 core_addr_to_string_nz (start), insn);
457 }
458 break;
459 }
460 }
461
462 if (cache == NULL)
463 return start;
464
465 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
466 {
467 /* Frame pointer is fp. Frame size is constant. */
468 cache->framereg = AARCH64_FP_REGNUM;
469 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
470 }
471 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
472 {
473 /* Try the stack pointer. */
474 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
475 cache->framereg = AARCH64_SP_REGNUM;
476 }
477 else
478 {
479 /* We're just out of luck. We don't know where the frame is. */
480 cache->framereg = -1;
481 cache->framesize = 0;
482 }
483
484 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
485 {
486 CORE_ADDR offset;
487
488 if (stack.find_reg (gdbarch, i, &offset))
489 cache->saved_regs[i].addr = offset;
490 }
491
492 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
493 {
494 int regnum = gdbarch_num_regs (gdbarch);
495 CORE_ADDR offset;
496
497 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
498 &offset))
499 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
500 }
501
502 return start;
503 }
504
505 static CORE_ADDR
506 aarch64_analyze_prologue (struct gdbarch *gdbarch,
507 CORE_ADDR start, CORE_ADDR limit,
508 struct aarch64_prologue_cache *cache)
509 {
510 instruction_reader reader;
511
512 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
513 reader);
514 }
515
516 #if GDB_SELF_TEST
517
518 namespace selftests {
519
520 /* Instruction reader from manually cooked instruction sequences. */
521
522 class instruction_reader_test : public abstract_instruction_reader
523 {
524 public:
525 template<size_t SIZE>
526 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
527 : m_insns (insns), m_insns_size (SIZE)
528 {}
529
530 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
531 override
532 {
533 SELF_CHECK (len == 4);
534 SELF_CHECK (memaddr % 4 == 0);
535 SELF_CHECK (memaddr / 4 < m_insns_size);
536
537 return m_insns[memaddr / 4];
538 }
539
540 private:
541 const uint32_t *m_insns;
542 size_t m_insns_size;
543 };
544
545 static void
546 aarch64_analyze_prologue_test (void)
547 {
548 struct gdbarch_info info;
549
550 gdbarch_info_init (&info);
551 info.bfd_arch_info = bfd_scan_arch ("aarch64");
552
553 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
554 SELF_CHECK (gdbarch != NULL);
555
556 /* Test the simple prologue in which frame pointer is used. */
557 {
558 struct aarch64_prologue_cache cache;
559 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
560
561 static const uint32_t insns[] = {
562 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
563 0x910003fd, /* mov x29, sp */
564 0x97ffffe6, /* bl 0x400580 */
565 };
566 instruction_reader_test reader (insns);
567
568 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
569 SELF_CHECK (end == 4 * 2);
570
571 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
572 SELF_CHECK (cache.framesize == 272);
573
574 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
575 {
576 if (i == AARCH64_FP_REGNUM)
577 SELF_CHECK (cache.saved_regs[i].addr == -272);
578 else if (i == AARCH64_LR_REGNUM)
579 SELF_CHECK (cache.saved_regs[i].addr == -264);
580 else
581 SELF_CHECK (cache.saved_regs[i].addr == -1);
582 }
583
584 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
585 {
586 int regnum = gdbarch_num_regs (gdbarch);
587
588 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
589 == -1);
590 }
591 }
592
593 /* Test a prologue in which STR is used and frame pointer is not
594 used. */
595 {
596 struct aarch64_prologue_cache cache;
597 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
598
599 static const uint32_t insns[] = {
600 0xf81d0ff3, /* str x19, [sp, #-48]! */
601 0xb9002fe0, /* str w0, [sp, #44] */
602 0xf90013e1, /* str x1, [sp, #32]*/
603 0xfd000fe0, /* str d0, [sp, #24] */
604 0xaa0203f3, /* mov x19, x2 */
605 0xf94013e0, /* ldr x0, [sp, #32] */
606 };
607 instruction_reader_test reader (insns);
608
609 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
610
611 SELF_CHECK (end == 4 * 5);
612
613 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
614 SELF_CHECK (cache.framesize == 48);
615
616 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
617 {
618 if (i == 1)
619 SELF_CHECK (cache.saved_regs[i].addr == -16);
620 else if (i == 19)
621 SELF_CHECK (cache.saved_regs[i].addr == -48);
622 else
623 SELF_CHECK (cache.saved_regs[i].addr == -1);
624 }
625
626 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
627 {
628 int regnum = gdbarch_num_regs (gdbarch);
629
630 if (i == 0)
631 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
632 == -24);
633 else
634 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
635 == -1);
636 }
637 }
638 }
639 } // namespace selftests
640 #endif /* GDB_SELF_TEST */
641
642 /* Implement the "skip_prologue" gdbarch method. */
643
644 static CORE_ADDR
645 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
646 {
647 CORE_ADDR func_addr, limit_pc;
648
649 /* See if we can determine the end of the prologue via the symbol
650 table. If so, then return either PC, or the PC after the
651 prologue, whichever is greater. */
652 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
653 {
654 CORE_ADDR post_prologue_pc
655 = skip_prologue_using_sal (gdbarch, func_addr);
656
657 if (post_prologue_pc != 0)
658 return std::max (pc, post_prologue_pc);
659 }
660
661 /* Can't determine prologue from the symbol table, need to examine
662 instructions. */
663
664 /* Find an upper limit on the function prologue using the debug
665 information. If the debug information could not be used to
666 provide that bound, then use an arbitrary large number as the
667 upper bound. */
668 limit_pc = skip_prologue_using_sal (gdbarch, pc);
669 if (limit_pc == 0)
670 limit_pc = pc + 128; /* Magic. */
671
672 /* Try disassembling prologue. */
673 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
674 }
675
676 /* Scan the function prologue for THIS_FRAME and populate the prologue
677 cache CACHE. */
678
679 static void
680 aarch64_scan_prologue (struct frame_info *this_frame,
681 struct aarch64_prologue_cache *cache)
682 {
683 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
684 CORE_ADDR prologue_start;
685 CORE_ADDR prologue_end;
686 CORE_ADDR prev_pc = get_frame_pc (this_frame);
687 struct gdbarch *gdbarch = get_frame_arch (this_frame);
688
689 cache->prev_pc = prev_pc;
690
691 /* Assume we do not find a frame. */
692 cache->framereg = -1;
693 cache->framesize = 0;
694
695 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
696 &prologue_end))
697 {
698 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
699
700 if (sal.line == 0)
701 {
702 /* No line info so use the current PC. */
703 prologue_end = prev_pc;
704 }
705 else if (sal.end < prologue_end)
706 {
707 /* The next line begins after the function end. */
708 prologue_end = sal.end;
709 }
710
711 prologue_end = std::min (prologue_end, prev_pc);
712 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
713 }
714 else
715 {
716 CORE_ADDR frame_loc;
717
718 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
719 if (frame_loc == 0)
720 return;
721
722 cache->framereg = AARCH64_FP_REGNUM;
723 cache->framesize = 16;
724 cache->saved_regs[29].addr = 0;
725 cache->saved_regs[30].addr = 8;
726 }
727 }
728
729 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
730 function may throw an exception if the inferior's registers or memory is
731 not available. */
732
733 static void
734 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
735 struct aarch64_prologue_cache *cache)
736 {
737 CORE_ADDR unwound_fp;
738 int reg;
739
740 aarch64_scan_prologue (this_frame, cache);
741
742 if (cache->framereg == -1)
743 return;
744
745 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
746 if (unwound_fp == 0)
747 return;
748
749 cache->prev_sp = unwound_fp + cache->framesize;
750
751 /* Calculate actual addresses of saved registers using offsets
752 determined by aarch64_analyze_prologue. */
753 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
754 if (trad_frame_addr_p (cache->saved_regs, reg))
755 cache->saved_regs[reg].addr += cache->prev_sp;
756
757 cache->func = get_frame_func (this_frame);
758
759 cache->available_p = 1;
760 }
761
762 /* Allocate and fill in *THIS_CACHE with information about the prologue of
763 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
764 Return a pointer to the current aarch64_prologue_cache in
765 *THIS_CACHE. */
766
767 static struct aarch64_prologue_cache *
768 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
769 {
770 struct aarch64_prologue_cache *cache;
771
772 if (*this_cache != NULL)
773 return (struct aarch64_prologue_cache *) *this_cache;
774
775 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
776 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
777 *this_cache = cache;
778
779 TRY
780 {
781 aarch64_make_prologue_cache_1 (this_frame, cache);
782 }
783 CATCH (ex, RETURN_MASK_ERROR)
784 {
785 if (ex.error != NOT_AVAILABLE_ERROR)
786 throw_exception (ex);
787 }
788 END_CATCH
789
790 return cache;
791 }
792
793 /* Implement the "stop_reason" frame_unwind method. */
794
795 static enum unwind_stop_reason
796 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
797 void **this_cache)
798 {
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
801
802 if (!cache->available_p)
803 return UNWIND_UNAVAILABLE;
804
805 /* Halt the backtrace at "_start". */
806 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
807 return UNWIND_OUTERMOST;
808
809 /* We've hit a wall, stop. */
810 if (cache->prev_sp == 0)
811 return UNWIND_OUTERMOST;
812
813 return UNWIND_NO_REASON;
814 }
815
816 /* Our frame ID for a normal frame is the current function's starting
817 PC and the caller's SP when we were called. */
818
819 static void
820 aarch64_prologue_this_id (struct frame_info *this_frame,
821 void **this_cache, struct frame_id *this_id)
822 {
823 struct aarch64_prologue_cache *cache
824 = aarch64_make_prologue_cache (this_frame, this_cache);
825
826 if (!cache->available_p)
827 *this_id = frame_id_build_unavailable_stack (cache->func);
828 else
829 *this_id = frame_id_build (cache->prev_sp, cache->func);
830 }
831
832 /* Implement the "prev_register" frame_unwind method. */
833
834 static struct value *
835 aarch64_prologue_prev_register (struct frame_info *this_frame,
836 void **this_cache, int prev_regnum)
837 {
838 struct aarch64_prologue_cache *cache
839 = aarch64_make_prologue_cache (this_frame, this_cache);
840
841 /* If we are asked to unwind the PC, then we need to return the LR
842 instead. The prologue may save PC, but it will point into this
843 frame's prologue, not the next frame's resume location. */
844 if (prev_regnum == AARCH64_PC_REGNUM)
845 {
846 CORE_ADDR lr;
847
848 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
849 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
850 }
851
852 /* SP is generally not saved to the stack, but this frame is
853 identified by the next frame's stack pointer at the time of the
854 call. The value was already reconstructed into PREV_SP. */
855 /*
856 +----------+ ^
857 | saved lr | |
858 +->| saved fp |--+
859 | | |
860 | | | <- Previous SP
861 | +----------+
862 | | saved lr |
863 +--| saved fp |<- FP
864 | |
865 | |<- SP
866 +----------+ */
867 if (prev_regnum == AARCH64_SP_REGNUM)
868 return frame_unwind_got_constant (this_frame, prev_regnum,
869 cache->prev_sp);
870
871 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
872 prev_regnum);
873 }
874
875 /* AArch64 prologue unwinder. */
876 struct frame_unwind aarch64_prologue_unwind =
877 {
878 NORMAL_FRAME,
879 aarch64_prologue_frame_unwind_stop_reason,
880 aarch64_prologue_this_id,
881 aarch64_prologue_prev_register,
882 NULL,
883 default_frame_sniffer
884 };
885
886 /* Allocate and fill in *THIS_CACHE with information about the prologue of
887 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
888 Return a pointer to the current aarch64_prologue_cache in
889 *THIS_CACHE. */
890
891 static struct aarch64_prologue_cache *
892 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
893 {
894 struct aarch64_prologue_cache *cache;
895
896 if (*this_cache != NULL)
897 return (struct aarch64_prologue_cache *) *this_cache;
898
899 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
900 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
901 *this_cache = cache;
902
903 TRY
904 {
905 cache->prev_sp = get_frame_register_unsigned (this_frame,
906 AARCH64_SP_REGNUM);
907 cache->prev_pc = get_frame_pc (this_frame);
908 cache->available_p = 1;
909 }
910 CATCH (ex, RETURN_MASK_ERROR)
911 {
912 if (ex.error != NOT_AVAILABLE_ERROR)
913 throw_exception (ex);
914 }
915 END_CATCH
916
917 return cache;
918 }
919
920 /* Implement the "stop_reason" frame_unwind method. */
921
922 static enum unwind_stop_reason
923 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
924 void **this_cache)
925 {
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_stub_cache (this_frame, this_cache);
928
929 if (!cache->available_p)
930 return UNWIND_UNAVAILABLE;
931
932 return UNWIND_NO_REASON;
933 }
934
935 /* Our frame ID for a stub frame is the current SP and LR. */
936
937 static void
938 aarch64_stub_this_id (struct frame_info *this_frame,
939 void **this_cache, struct frame_id *this_id)
940 {
941 struct aarch64_prologue_cache *cache
942 = aarch64_make_stub_cache (this_frame, this_cache);
943
944 if (cache->available_p)
945 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
946 else
947 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
948 }
949
950 /* Implement the "sniffer" frame_unwind method. */
951
952 static int
953 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
954 struct frame_info *this_frame,
955 void **this_prologue_cache)
956 {
957 CORE_ADDR addr_in_block;
958 gdb_byte dummy[4];
959
960 addr_in_block = get_frame_address_in_block (this_frame);
961 if (in_plt_section (addr_in_block)
962 /* We also use the stub winder if the target memory is unreadable
963 to avoid having the prologue unwinder trying to read it. */
964 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
965 return 1;
966
967 return 0;
968 }
969
970 /* AArch64 stub unwinder. */
971 struct frame_unwind aarch64_stub_unwind =
972 {
973 NORMAL_FRAME,
974 aarch64_stub_frame_unwind_stop_reason,
975 aarch64_stub_this_id,
976 aarch64_prologue_prev_register,
977 NULL,
978 aarch64_stub_unwind_sniffer
979 };
980
981 /* Return the frame base address of *THIS_FRAME. */
982
983 static CORE_ADDR
984 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
985 {
986 struct aarch64_prologue_cache *cache
987 = aarch64_make_prologue_cache (this_frame, this_cache);
988
989 return cache->prev_sp - cache->framesize;
990 }
991
992 /* AArch64 default frame base information. */
993 struct frame_base aarch64_normal_base =
994 {
995 &aarch64_prologue_unwind,
996 aarch64_normal_frame_base,
997 aarch64_normal_frame_base,
998 aarch64_normal_frame_base
999 };
1000
1001 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1002 dummy frame. The frame ID's base needs to match the TOS value
1003 saved by save_dummy_frame_tos () and returned from
1004 aarch64_push_dummy_call, and the PC needs to match the dummy
1005 frame's breakpoint. */
1006
1007 static struct frame_id
1008 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1009 {
1010 return frame_id_build (get_frame_register_unsigned (this_frame,
1011 AARCH64_SP_REGNUM),
1012 get_frame_pc (this_frame));
1013 }
1014
1015 /* Implement the "unwind_pc" gdbarch method. */
1016
1017 static CORE_ADDR
1018 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1019 {
1020 CORE_ADDR pc
1021 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1022
1023 return pc;
1024 }
1025
1026 /* Implement the "unwind_sp" gdbarch method. */
1027
1028 static CORE_ADDR
1029 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1030 {
1031 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1032 }
1033
1034 /* Return the value of the REGNUM register in the previous frame of
1035 *THIS_FRAME. */
1036
1037 static struct value *
1038 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1039 void **this_cache, int regnum)
1040 {
1041 CORE_ADDR lr;
1042
1043 switch (regnum)
1044 {
1045 case AARCH64_PC_REGNUM:
1046 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1047 return frame_unwind_got_constant (this_frame, regnum, lr);
1048
1049 default:
1050 internal_error (__FILE__, __LINE__,
1051 _("Unexpected register %d"), regnum);
1052 }
1053 }
1054
1055 /* Implement the "init_reg" dwarf2_frame_ops method. */
1056
1057 static void
1058 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1059 struct dwarf2_frame_state_reg *reg,
1060 struct frame_info *this_frame)
1061 {
1062 switch (regnum)
1063 {
1064 case AARCH64_PC_REGNUM:
1065 reg->how = DWARF2_FRAME_REG_FN;
1066 reg->loc.fn = aarch64_dwarf2_prev_register;
1067 break;
1068 case AARCH64_SP_REGNUM:
1069 reg->how = DWARF2_FRAME_REG_CFA;
1070 break;
1071 }
1072 }
1073
1074 /* When arguments must be pushed onto the stack, they go on in reverse
1075 order. The code below implements a FILO (stack) to do this. */
1076
1077 typedef struct
1078 {
1079 /* Value to pass on stack. It can be NULL if this item is for stack
1080 padding. */
1081 const gdb_byte *data;
1082
1083 /* Size in bytes of value to pass on stack. */
1084 int len;
1085 } stack_item_t;
1086
1087 DEF_VEC_O (stack_item_t);
1088
1089 /* Return the alignment (in bytes) of the given type. */
1090
1091 static int
1092 aarch64_type_align (struct type *t)
1093 {
1094 int n;
1095 int align;
1096 int falign;
1097
1098 t = check_typedef (t);
1099 switch (TYPE_CODE (t))
1100 {
1101 default:
1102 /* Should never happen. */
1103 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1104 return 4;
1105
1106 case TYPE_CODE_PTR:
1107 case TYPE_CODE_ENUM:
1108 case TYPE_CODE_INT:
1109 case TYPE_CODE_FLT:
1110 case TYPE_CODE_SET:
1111 case TYPE_CODE_RANGE:
1112 case TYPE_CODE_BITSTRING:
1113 case TYPE_CODE_REF:
1114 case TYPE_CODE_RVALUE_REF:
1115 case TYPE_CODE_CHAR:
1116 case TYPE_CODE_BOOL:
1117 return TYPE_LENGTH (t);
1118
1119 case TYPE_CODE_ARRAY:
1120 if (TYPE_VECTOR (t))
1121 {
1122 /* Use the natural alignment for vector types (the same for
1123 scalar type), but the maximum alignment is 128-bit. */
1124 if (TYPE_LENGTH (t) > 16)
1125 return 16;
1126 else
1127 return TYPE_LENGTH (t);
1128 }
1129 else
1130 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1131 case TYPE_CODE_COMPLEX:
1132 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1133
1134 case TYPE_CODE_STRUCT:
1135 case TYPE_CODE_UNION:
1136 align = 1;
1137 for (n = 0; n < TYPE_NFIELDS (t); n++)
1138 {
1139 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1140 if (falign > align)
1141 align = falign;
1142 }
1143 return align;
1144 }
1145 }
1146
1147 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1148
1149 Return the number of register required, or -1 on failure.
1150
1151 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1152 to the element, else fail if the type of this element does not match the
1153 existing value. */
1154
1155 static int
1156 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1157 struct type **fundamental_type)
1158 {
1159 if (type == nullptr)
1160 return -1;
1161
1162 switch (TYPE_CODE (type))
1163 {
1164 case TYPE_CODE_FLT:
1165 if (TYPE_LENGTH (type) > 16)
1166 return -1;
1167
1168 if (*fundamental_type == nullptr)
1169 *fundamental_type = type;
1170 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1171 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1172 return -1;
1173
1174 return 1;
1175
1176 case TYPE_CODE_COMPLEX:
1177 {
1178 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1179 if (TYPE_LENGTH (target_type) > 16)
1180 return -1;
1181
1182 if (*fundamental_type == nullptr)
1183 *fundamental_type = target_type;
1184 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1185 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1186 return -1;
1187
1188 return 2;
1189 }
1190
1191 case TYPE_CODE_ARRAY:
1192 {
1193 if (TYPE_VECTOR (type))
1194 {
1195 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1196 return -1;
1197
1198 if (*fundamental_type == nullptr)
1199 *fundamental_type = type;
1200 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1201 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1202 return -1;
1203
1204 return 1;
1205 }
1206 else
1207 {
1208 struct type *target_type = TYPE_TARGET_TYPE (type);
1209 int count = aapcs_is_vfp_call_or_return_candidate_1
1210 (target_type, fundamental_type);
1211
1212 if (count == -1)
1213 return count;
1214
1215 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1216 return count;
1217 }
1218 }
1219
1220 case TYPE_CODE_STRUCT:
1221 case TYPE_CODE_UNION:
1222 {
1223 int count = 0;
1224
1225 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1226 {
1227 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1228
1229 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1230 (member, fundamental_type);
1231 if (sub_count == -1)
1232 return -1;
1233 count += sub_count;
1234 }
1235 return count;
1236 }
1237
1238 default:
1239 break;
1240 }
1241
1242 return -1;
1243 }
1244
1245 /* Return true if an argument, whose type is described by TYPE, can be passed or
1246 returned in simd/fp registers, providing enough parameter passing registers
1247 are available. This is as described in the AAPCS64.
1248
1249 Upon successful return, *COUNT returns the number of needed registers,
1250 *FUNDAMENTAL_TYPE contains the type of those registers.
1251
1252 Candidate as per the AAPCS64 5.4.2.C is either a:
1253 - float.
1254 - short-vector.
1255 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1256 all the members are floats and has at most 4 members.
1257 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1258 all the members are short vectors and has at most 4 members.
1259 - Complex (7.1.1)
1260
1261 Note that HFAs and HVAs can include nested structures and arrays. */
1262
1263 static bool
1264 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1265 struct type **fundamental_type)
1266 {
1267 if (type == nullptr)
1268 return false;
1269
1270 *fundamental_type = nullptr;
1271
1272 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1273 fundamental_type);
1274
1275 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1276 {
1277 *count = ag_count;
1278 return true;
1279 }
1280 else
1281 return false;
1282 }
1283
1284 /* AArch64 function call information structure. */
1285 struct aarch64_call_info
1286 {
1287 /* the current argument number. */
1288 unsigned argnum;
1289
1290 /* The next general purpose register number, equivalent to NGRN as
1291 described in the AArch64 Procedure Call Standard. */
1292 unsigned ngrn;
1293
1294 /* The next SIMD and floating point register number, equivalent to
1295 NSRN as described in the AArch64 Procedure Call Standard. */
1296 unsigned nsrn;
1297
1298 /* The next stacked argument address, equivalent to NSAA as
1299 described in the AArch64 Procedure Call Standard. */
1300 unsigned nsaa;
1301
1302 /* Stack item vector. */
1303 VEC(stack_item_t) *si;
1304 };
1305
1306 /* Pass a value in a sequence of consecutive X registers. The caller
1307 is responsbile for ensuring sufficient registers are available. */
1308
1309 static void
1310 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1311 struct aarch64_call_info *info, struct type *type,
1312 struct value *arg)
1313 {
1314 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1315 int len = TYPE_LENGTH (type);
1316 enum type_code typecode = TYPE_CODE (type);
1317 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1318 const bfd_byte *buf = value_contents (arg);
1319
1320 info->argnum++;
1321
1322 while (len > 0)
1323 {
1324 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1325 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1326 byte_order);
1327
1328
1329 /* Adjust sub-word struct/union args when big-endian. */
1330 if (byte_order == BFD_ENDIAN_BIG
1331 && partial_len < X_REGISTER_SIZE
1332 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1333 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1334
1335 if (aarch64_debug)
1336 {
1337 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1338 gdbarch_register_name (gdbarch, regnum),
1339 phex (regval, X_REGISTER_SIZE));
1340 }
1341 regcache_cooked_write_unsigned (regcache, regnum, regval);
1342 len -= partial_len;
1343 buf += partial_len;
1344 regnum++;
1345 }
1346 }
1347
1348 /* Attempt to marshall a value in a V register. Return 1 if
1349 successful, or 0 if insufficient registers are available. This
1350 function, unlike the equivalent pass_in_x() function does not
1351 handle arguments spread across multiple registers. */
1352
1353 static int
1354 pass_in_v (struct gdbarch *gdbarch,
1355 struct regcache *regcache,
1356 struct aarch64_call_info *info,
1357 int len, const bfd_byte *buf)
1358 {
1359 if (info->nsrn < 8)
1360 {
1361 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1362 /* Enough space for a full vector register. */
1363 gdb_byte reg[register_size (gdbarch, regnum)];
1364 gdb_assert (len <= sizeof (reg));
1365
1366 info->argnum++;
1367 info->nsrn++;
1368
1369 memset (reg, 0, sizeof (reg));
1370 /* PCS C.1, the argument is allocated to the least significant
1371 bits of V register. */
1372 memcpy (reg, buf, len);
1373 regcache->cooked_write (regnum, reg);
1374
1375 if (aarch64_debug)
1376 {
1377 debug_printf ("arg %d in %s\n", info->argnum,
1378 gdbarch_register_name (gdbarch, regnum));
1379 }
1380 return 1;
1381 }
1382 info->nsrn = 8;
1383 return 0;
1384 }
1385
1386 /* Marshall an argument onto the stack. */
1387
1388 static void
1389 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1390 struct value *arg)
1391 {
1392 const bfd_byte *buf = value_contents (arg);
1393 int len = TYPE_LENGTH (type);
1394 int align;
1395 stack_item_t item;
1396
1397 info->argnum++;
1398
1399 align = aarch64_type_align (type);
1400
1401 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1402 Natural alignment of the argument's type. */
1403 align = align_up (align, 8);
1404
1405 /* The AArch64 PCS requires at most doubleword alignment. */
1406 if (align > 16)
1407 align = 16;
1408
1409 if (aarch64_debug)
1410 {
1411 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1412 info->nsaa);
1413 }
1414
1415 item.len = len;
1416 item.data = buf;
1417 VEC_safe_push (stack_item_t, info->si, &item);
1418
1419 info->nsaa += len;
1420 if (info->nsaa & (align - 1))
1421 {
1422 /* Push stack alignment padding. */
1423 int pad = align - (info->nsaa & (align - 1));
1424
1425 item.len = pad;
1426 item.data = NULL;
1427
1428 VEC_safe_push (stack_item_t, info->si, &item);
1429 info->nsaa += pad;
1430 }
1431 }
1432
1433 /* Marshall an argument into a sequence of one or more consecutive X
1434 registers or, if insufficient X registers are available then onto
1435 the stack. */
1436
1437 static void
1438 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1439 struct aarch64_call_info *info, struct type *type,
1440 struct value *arg)
1441 {
1442 int len = TYPE_LENGTH (type);
1443 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1444
1445 /* PCS C.13 - Pass in registers if we have enough spare */
1446 if (info->ngrn + nregs <= 8)
1447 {
1448 pass_in_x (gdbarch, regcache, info, type, arg);
1449 info->ngrn += nregs;
1450 }
1451 else
1452 {
1453 info->ngrn = 8;
1454 pass_on_stack (info, type, arg);
1455 }
1456 }
1457
1458 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1459 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1460 registers. A return value of false is an error state as the value will have
1461 been partially passed to the stack. */
1462 static bool
1463 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1464 struct aarch64_call_info *info, struct type *arg_type,
1465 struct value *arg)
1466 {
1467 switch (TYPE_CODE (arg_type))
1468 {
1469 case TYPE_CODE_FLT:
1470 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1471 value_contents (arg));
1472 break;
1473
1474 case TYPE_CODE_COMPLEX:
1475 {
1476 const bfd_byte *buf = value_contents (arg);
1477 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1478
1479 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1480 buf))
1481 return false;
1482
1483 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1484 buf + TYPE_LENGTH (target_type));
1485 }
1486
1487 case TYPE_CODE_ARRAY:
1488 if (TYPE_VECTOR (arg_type))
1489 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1490 value_contents (arg));
1491 /* fall through. */
1492
1493 case TYPE_CODE_STRUCT:
1494 case TYPE_CODE_UNION:
1495 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1496 {
1497 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1498 struct type *field_type = check_typedef (value_type (field));
1499
1500 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1501 field))
1502 return false;
1503 }
1504 return true;
1505
1506 default:
1507 return false;
1508 }
1509 }
1510
1511 /* Implement the "push_dummy_call" gdbarch method. */
1512
1513 static CORE_ADDR
1514 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1515 struct regcache *regcache, CORE_ADDR bp_addr,
1516 int nargs,
1517 struct value **args, CORE_ADDR sp,
1518 function_call_return_method return_method,
1519 CORE_ADDR struct_addr)
1520 {
1521 int argnum;
1522 struct aarch64_call_info info;
1523 struct type *func_type;
1524 struct type *return_type;
1525 int lang_struct_return;
1526
1527 memset (&info, 0, sizeof (info));
1528
1529 /* We need to know what the type of the called function is in order
1530 to determine the number of named/anonymous arguments for the
1531 actual argument placement, and the return type in order to handle
1532 return value correctly.
1533
1534 The generic code above us views the decision of return in memory
1535 or return in registers as a two stage processes. The language
1536 handler is consulted first and may decide to return in memory (eg
1537 class with copy constructor returned by value), this will cause
1538 the generic code to allocate space AND insert an initial leading
1539 argument.
1540
1541 If the language code does not decide to pass in memory then the
1542 target code is consulted.
1543
1544 If the language code decides to pass in memory we want to move
1545 the pointer inserted as the initial argument from the argument
1546 list and into X8, the conventional AArch64 struct return pointer
1547 register.
1548
1549 This is slightly awkward, ideally the flag "lang_struct_return"
1550 would be passed to the targets implementation of push_dummy_call.
1551 Rather that change the target interface we call the language code
1552 directly ourselves. */
1553
1554 func_type = check_typedef (value_type (function));
1555
1556 /* Dereference function pointer types. */
1557 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1558 func_type = TYPE_TARGET_TYPE (func_type);
1559
1560 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1561 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1562
1563 /* If language_pass_by_reference () returned true we will have been
1564 given an additional initial argument, a hidden pointer to the
1565 return slot in memory. */
1566 return_type = TYPE_TARGET_TYPE (func_type);
1567 lang_struct_return = language_pass_by_reference (return_type);
1568
1569 /* Set the return address. For the AArch64, the return breakpoint
1570 is always at BP_ADDR. */
1571 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1572
1573 /* If we were given an initial argument for the return slot because
1574 lang_struct_return was true, lose it. */
1575 if (lang_struct_return)
1576 {
1577 args++;
1578 nargs--;
1579 }
1580
1581 /* The struct_return pointer occupies X8. */
1582 if (return_method == return_method_struct || lang_struct_return)
1583 {
1584 if (aarch64_debug)
1585 {
1586 debug_printf ("struct return in %s = 0x%s\n",
1587 gdbarch_register_name (gdbarch,
1588 AARCH64_STRUCT_RETURN_REGNUM),
1589 paddress (gdbarch, struct_addr));
1590 }
1591 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1592 struct_addr);
1593 }
1594
1595 for (argnum = 0; argnum < nargs; argnum++)
1596 {
1597 struct value *arg = args[argnum];
1598 struct type *arg_type, *fundamental_type;
1599 int len, elements;
1600
1601 arg_type = check_typedef (value_type (arg));
1602 len = TYPE_LENGTH (arg_type);
1603
1604 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1605 if there are enough spare registers. */
1606 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1607 &fundamental_type))
1608 {
1609 if (info.nsrn + elements <= 8)
1610 {
1611 /* We know that we have sufficient registers available therefore
1612 this will never need to fallback to the stack. */
1613 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1614 arg))
1615 gdb_assert_not_reached ("Failed to push args");
1616 }
1617 else
1618 {
1619 info.nsrn = 8;
1620 pass_on_stack (&info, arg_type, arg);
1621 }
1622 continue;
1623 }
1624
1625 switch (TYPE_CODE (arg_type))
1626 {
1627 case TYPE_CODE_INT:
1628 case TYPE_CODE_BOOL:
1629 case TYPE_CODE_CHAR:
1630 case TYPE_CODE_RANGE:
1631 case TYPE_CODE_ENUM:
1632 if (len < 4)
1633 {
1634 /* Promote to 32 bit integer. */
1635 if (TYPE_UNSIGNED (arg_type))
1636 arg_type = builtin_type (gdbarch)->builtin_uint32;
1637 else
1638 arg_type = builtin_type (gdbarch)->builtin_int32;
1639 arg = value_cast (arg_type, arg);
1640 }
1641 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1642 break;
1643
1644 case TYPE_CODE_STRUCT:
1645 case TYPE_CODE_ARRAY:
1646 case TYPE_CODE_UNION:
1647 if (len > 16)
1648 {
1649 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1650 invisible reference. */
1651
1652 /* Allocate aligned storage. */
1653 sp = align_down (sp - len, 16);
1654
1655 /* Write the real data into the stack. */
1656 write_memory (sp, value_contents (arg), len);
1657
1658 /* Construct the indirection. */
1659 arg_type = lookup_pointer_type (arg_type);
1660 arg = value_from_pointer (arg_type, sp);
1661 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1662 }
1663 else
1664 /* PCS C.15 / C.18 multiple values pass. */
1665 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1666 break;
1667
1668 default:
1669 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1670 break;
1671 }
1672 }
1673
1674 /* Make sure stack retains 16 byte alignment. */
1675 if (info.nsaa & 15)
1676 sp -= 16 - (info.nsaa & 15);
1677
1678 while (!VEC_empty (stack_item_t, info.si))
1679 {
1680 stack_item_t *si = VEC_last (stack_item_t, info.si);
1681
1682 sp -= si->len;
1683 if (si->data != NULL)
1684 write_memory (sp, si->data, si->len);
1685 VEC_pop (stack_item_t, info.si);
1686 }
1687
1688 VEC_free (stack_item_t, info.si);
1689
1690 /* Finally, update the SP register. */
1691 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1692
1693 return sp;
1694 }
1695
1696 /* Implement the "frame_align" gdbarch method. */
1697
1698 static CORE_ADDR
1699 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1700 {
1701 /* Align the stack to sixteen bytes. */
1702 return sp & ~(CORE_ADDR) 15;
1703 }
1704
1705 /* Return the type for an AdvSISD Q register. */
1706
1707 static struct type *
1708 aarch64_vnq_type (struct gdbarch *gdbarch)
1709 {
1710 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1711
1712 if (tdep->vnq_type == NULL)
1713 {
1714 struct type *t;
1715 struct type *elem;
1716
1717 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1718 TYPE_CODE_UNION);
1719
1720 elem = builtin_type (gdbarch)->builtin_uint128;
1721 append_composite_type_field (t, "u", elem);
1722
1723 elem = builtin_type (gdbarch)->builtin_int128;
1724 append_composite_type_field (t, "s", elem);
1725
1726 tdep->vnq_type = t;
1727 }
1728
1729 return tdep->vnq_type;
1730 }
1731
1732 /* Return the type for an AdvSISD D register. */
1733
1734 static struct type *
1735 aarch64_vnd_type (struct gdbarch *gdbarch)
1736 {
1737 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1738
1739 if (tdep->vnd_type == NULL)
1740 {
1741 struct type *t;
1742 struct type *elem;
1743
1744 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1745 TYPE_CODE_UNION);
1746
1747 elem = builtin_type (gdbarch)->builtin_double;
1748 append_composite_type_field (t, "f", elem);
1749
1750 elem = builtin_type (gdbarch)->builtin_uint64;
1751 append_composite_type_field (t, "u", elem);
1752
1753 elem = builtin_type (gdbarch)->builtin_int64;
1754 append_composite_type_field (t, "s", elem);
1755
1756 tdep->vnd_type = t;
1757 }
1758
1759 return tdep->vnd_type;
1760 }
1761
1762 /* Return the type for an AdvSISD S register. */
1763
1764 static struct type *
1765 aarch64_vns_type (struct gdbarch *gdbarch)
1766 {
1767 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1768
1769 if (tdep->vns_type == NULL)
1770 {
1771 struct type *t;
1772 struct type *elem;
1773
1774 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1775 TYPE_CODE_UNION);
1776
1777 elem = builtin_type (gdbarch)->builtin_float;
1778 append_composite_type_field (t, "f", elem);
1779
1780 elem = builtin_type (gdbarch)->builtin_uint32;
1781 append_composite_type_field (t, "u", elem);
1782
1783 elem = builtin_type (gdbarch)->builtin_int32;
1784 append_composite_type_field (t, "s", elem);
1785
1786 tdep->vns_type = t;
1787 }
1788
1789 return tdep->vns_type;
1790 }
1791
1792 /* Return the type for an AdvSISD H register. */
1793
1794 static struct type *
1795 aarch64_vnh_type (struct gdbarch *gdbarch)
1796 {
1797 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1798
1799 if (tdep->vnh_type == NULL)
1800 {
1801 struct type *t;
1802 struct type *elem;
1803
1804 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1805 TYPE_CODE_UNION);
1806
1807 elem = builtin_type (gdbarch)->builtin_uint16;
1808 append_composite_type_field (t, "u", elem);
1809
1810 elem = builtin_type (gdbarch)->builtin_int16;
1811 append_composite_type_field (t, "s", elem);
1812
1813 tdep->vnh_type = t;
1814 }
1815
1816 return tdep->vnh_type;
1817 }
1818
1819 /* Return the type for an AdvSISD B register. */
1820
1821 static struct type *
1822 aarch64_vnb_type (struct gdbarch *gdbarch)
1823 {
1824 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1825
1826 if (tdep->vnb_type == NULL)
1827 {
1828 struct type *t;
1829 struct type *elem;
1830
1831 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1832 TYPE_CODE_UNION);
1833
1834 elem = builtin_type (gdbarch)->builtin_uint8;
1835 append_composite_type_field (t, "u", elem);
1836
1837 elem = builtin_type (gdbarch)->builtin_int8;
1838 append_composite_type_field (t, "s", elem);
1839
1840 tdep->vnb_type = t;
1841 }
1842
1843 return tdep->vnb_type;
1844 }
1845
1846 /* Return the type for an AdvSISD V register. */
1847
1848 static struct type *
1849 aarch64_vnv_type (struct gdbarch *gdbarch)
1850 {
1851 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1852
1853 if (tdep->vnv_type == NULL)
1854 {
1855 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1856 TYPE_CODE_UNION);
1857
1858 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1859 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1860 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1861 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1862 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1863
1864 tdep->vnv_type = t;
1865 }
1866
1867 return tdep->vnv_type;
1868 }
1869
1870 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1871
1872 static int
1873 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1874 {
1875 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1876 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1877
1878 if (reg == AARCH64_DWARF_SP)
1879 return AARCH64_SP_REGNUM;
1880
1881 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1882 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1883
1884 if (reg == AARCH64_DWARF_SVE_VG)
1885 return AARCH64_SVE_VG_REGNUM;
1886
1887 if (reg == AARCH64_DWARF_SVE_FFR)
1888 return AARCH64_SVE_FFR_REGNUM;
1889
1890 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1891 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1892
1893 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1894 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1895
1896 return -1;
1897 }
1898
1899 /* Implement the "print_insn" gdbarch method. */
1900
1901 static int
1902 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1903 {
1904 info->symbols = NULL;
1905 return default_print_insn (memaddr, info);
1906 }
1907
1908 /* AArch64 BRK software debug mode instruction.
1909 Note that AArch64 code is always little-endian.
1910 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1911 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1912
1913 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1914
1915 /* Extract from an array REGS containing the (raw) register state a
1916 function return value of type TYPE, and copy that, in virtual
1917 format, into VALBUF. */
1918
1919 static void
1920 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1921 gdb_byte *valbuf)
1922 {
1923 struct gdbarch *gdbarch = regs->arch ();
1924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1925 int elements;
1926 struct type *fundamental_type;
1927
1928 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1929 &fundamental_type))
1930 {
1931 int len = TYPE_LENGTH (fundamental_type);
1932
1933 for (int i = 0; i < elements; i++)
1934 {
1935 int regno = AARCH64_V0_REGNUM + i;
1936 /* Enough space for a full vector register. */
1937 gdb_byte buf[register_size (gdbarch, regno)];
1938 gdb_assert (len <= sizeof (buf));
1939
1940 if (aarch64_debug)
1941 {
1942 debug_printf ("read HFA or HVA return value element %d from %s\n",
1943 i + 1,
1944 gdbarch_register_name (gdbarch, regno));
1945 }
1946 regs->cooked_read (regno, buf);
1947
1948 memcpy (valbuf, buf, len);
1949 valbuf += len;
1950 }
1951 }
1952 else if (TYPE_CODE (type) == TYPE_CODE_INT
1953 || TYPE_CODE (type) == TYPE_CODE_CHAR
1954 || TYPE_CODE (type) == TYPE_CODE_BOOL
1955 || TYPE_CODE (type) == TYPE_CODE_PTR
1956 || TYPE_IS_REFERENCE (type)
1957 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1958 {
1959 /* If the the type is a plain integer, then the access is
1960 straight-forward. Otherwise we have to play around a bit
1961 more. */
1962 int len = TYPE_LENGTH (type);
1963 int regno = AARCH64_X0_REGNUM;
1964 ULONGEST tmp;
1965
1966 while (len > 0)
1967 {
1968 /* By using store_unsigned_integer we avoid having to do
1969 anything special for small big-endian values. */
1970 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1971 store_unsigned_integer (valbuf,
1972 (len > X_REGISTER_SIZE
1973 ? X_REGISTER_SIZE : len), byte_order, tmp);
1974 len -= X_REGISTER_SIZE;
1975 valbuf += X_REGISTER_SIZE;
1976 }
1977 }
1978 else
1979 {
1980 /* For a structure or union the behaviour is as if the value had
1981 been stored to word-aligned memory and then loaded into
1982 registers with 64-bit load instruction(s). */
1983 int len = TYPE_LENGTH (type);
1984 int regno = AARCH64_X0_REGNUM;
1985 bfd_byte buf[X_REGISTER_SIZE];
1986
1987 while (len > 0)
1988 {
1989 regs->cooked_read (regno++, buf);
1990 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1991 len -= X_REGISTER_SIZE;
1992 valbuf += X_REGISTER_SIZE;
1993 }
1994 }
1995 }
1996
1997
1998 /* Will a function return an aggregate type in memory or in a
1999 register? Return 0 if an aggregate type can be returned in a
2000 register, 1 if it must be returned in memory. */
2001
2002 static int
2003 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2004 {
2005 type = check_typedef (type);
2006 int elements;
2007 struct type *fundamental_type;
2008
2009 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2010 &fundamental_type))
2011 {
2012 /* v0-v7 are used to return values and one register is allocated
2013 for one member. However, HFA or HVA has at most four members. */
2014 return 0;
2015 }
2016
2017 if (TYPE_LENGTH (type) > 16)
2018 {
2019 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2020 invisible reference. */
2021
2022 return 1;
2023 }
2024
2025 return 0;
2026 }
2027
2028 /* Write into appropriate registers a function return value of type
2029 TYPE, given in virtual format. */
2030
2031 static void
2032 aarch64_store_return_value (struct type *type, struct regcache *regs,
2033 const gdb_byte *valbuf)
2034 {
2035 struct gdbarch *gdbarch = regs->arch ();
2036 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2037 int elements;
2038 struct type *fundamental_type;
2039
2040 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2041 &fundamental_type))
2042 {
2043 int len = TYPE_LENGTH (fundamental_type);
2044
2045 for (int i = 0; i < elements; i++)
2046 {
2047 int regno = AARCH64_V0_REGNUM + i;
2048 /* Enough space for a full vector register. */
2049 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2050 gdb_assert (len <= sizeof (tmpbuf));
2051
2052 if (aarch64_debug)
2053 {
2054 debug_printf ("write HFA or HVA return value element %d to %s\n",
2055 i + 1,
2056 gdbarch_register_name (gdbarch, regno));
2057 }
2058
2059 memcpy (tmpbuf, valbuf,
2060 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2061 regs->cooked_write (regno, tmpbuf);
2062 valbuf += len;
2063 }
2064 }
2065 else if (TYPE_CODE (type) == TYPE_CODE_INT
2066 || TYPE_CODE (type) == TYPE_CODE_CHAR
2067 || TYPE_CODE (type) == TYPE_CODE_BOOL
2068 || TYPE_CODE (type) == TYPE_CODE_PTR
2069 || TYPE_IS_REFERENCE (type)
2070 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2071 {
2072 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2073 {
2074 /* Values of one word or less are zero/sign-extended and
2075 returned in r0. */
2076 bfd_byte tmpbuf[X_REGISTER_SIZE];
2077 LONGEST val = unpack_long (type, valbuf);
2078
2079 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2080 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2081 }
2082 else
2083 {
2084 /* Integral values greater than one word are stored in
2085 consecutive registers starting with r0. This will always
2086 be a multiple of the regiser size. */
2087 int len = TYPE_LENGTH (type);
2088 int regno = AARCH64_X0_REGNUM;
2089
2090 while (len > 0)
2091 {
2092 regs->cooked_write (regno++, valbuf);
2093 len -= X_REGISTER_SIZE;
2094 valbuf += X_REGISTER_SIZE;
2095 }
2096 }
2097 }
2098 else
2099 {
2100 /* For a structure or union the behaviour is as if the value had
2101 been stored to word-aligned memory and then loaded into
2102 registers with 64-bit load instruction(s). */
2103 int len = TYPE_LENGTH (type);
2104 int regno = AARCH64_X0_REGNUM;
2105 bfd_byte tmpbuf[X_REGISTER_SIZE];
2106
2107 while (len > 0)
2108 {
2109 memcpy (tmpbuf, valbuf,
2110 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2111 regs->cooked_write (regno++, tmpbuf);
2112 len -= X_REGISTER_SIZE;
2113 valbuf += X_REGISTER_SIZE;
2114 }
2115 }
2116 }
2117
2118 /* Implement the "return_value" gdbarch method. */
2119
2120 static enum return_value_convention
2121 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2122 struct type *valtype, struct regcache *regcache,
2123 gdb_byte *readbuf, const gdb_byte *writebuf)
2124 {
2125
2126 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2127 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2128 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2129 {
2130 if (aarch64_return_in_memory (gdbarch, valtype))
2131 {
2132 if (aarch64_debug)
2133 debug_printf ("return value in memory\n");
2134 return RETURN_VALUE_STRUCT_CONVENTION;
2135 }
2136 }
2137
2138 if (writebuf)
2139 aarch64_store_return_value (valtype, regcache, writebuf);
2140
2141 if (readbuf)
2142 aarch64_extract_return_value (valtype, regcache, readbuf);
2143
2144 if (aarch64_debug)
2145 debug_printf ("return value in registers\n");
2146
2147 return RETURN_VALUE_REGISTER_CONVENTION;
2148 }
2149
2150 /* Implement the "get_longjmp_target" gdbarch method. */
2151
2152 static int
2153 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2154 {
2155 CORE_ADDR jb_addr;
2156 gdb_byte buf[X_REGISTER_SIZE];
2157 struct gdbarch *gdbarch = get_frame_arch (frame);
2158 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2159 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2160
2161 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2162
2163 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2164 X_REGISTER_SIZE))
2165 return 0;
2166
2167 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2168 return 1;
2169 }
2170
2171 /* Implement the "gen_return_address" gdbarch method. */
2172
2173 static void
2174 aarch64_gen_return_address (struct gdbarch *gdbarch,
2175 struct agent_expr *ax, struct axs_value *value,
2176 CORE_ADDR scope)
2177 {
2178 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2179 value->kind = axs_lvalue_register;
2180 value->u.reg = AARCH64_LR_REGNUM;
2181 }
2182 \f
2183
2184 /* Return the pseudo register name corresponding to register regnum. */
2185
2186 static const char *
2187 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2188 {
2189 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2190
2191 static const char *const q_name[] =
2192 {
2193 "q0", "q1", "q2", "q3",
2194 "q4", "q5", "q6", "q7",
2195 "q8", "q9", "q10", "q11",
2196 "q12", "q13", "q14", "q15",
2197 "q16", "q17", "q18", "q19",
2198 "q20", "q21", "q22", "q23",
2199 "q24", "q25", "q26", "q27",
2200 "q28", "q29", "q30", "q31",
2201 };
2202
2203 static const char *const d_name[] =
2204 {
2205 "d0", "d1", "d2", "d3",
2206 "d4", "d5", "d6", "d7",
2207 "d8", "d9", "d10", "d11",
2208 "d12", "d13", "d14", "d15",
2209 "d16", "d17", "d18", "d19",
2210 "d20", "d21", "d22", "d23",
2211 "d24", "d25", "d26", "d27",
2212 "d28", "d29", "d30", "d31",
2213 };
2214
2215 static const char *const s_name[] =
2216 {
2217 "s0", "s1", "s2", "s3",
2218 "s4", "s5", "s6", "s7",
2219 "s8", "s9", "s10", "s11",
2220 "s12", "s13", "s14", "s15",
2221 "s16", "s17", "s18", "s19",
2222 "s20", "s21", "s22", "s23",
2223 "s24", "s25", "s26", "s27",
2224 "s28", "s29", "s30", "s31",
2225 };
2226
2227 static const char *const h_name[] =
2228 {
2229 "h0", "h1", "h2", "h3",
2230 "h4", "h5", "h6", "h7",
2231 "h8", "h9", "h10", "h11",
2232 "h12", "h13", "h14", "h15",
2233 "h16", "h17", "h18", "h19",
2234 "h20", "h21", "h22", "h23",
2235 "h24", "h25", "h26", "h27",
2236 "h28", "h29", "h30", "h31",
2237 };
2238
2239 static const char *const b_name[] =
2240 {
2241 "b0", "b1", "b2", "b3",
2242 "b4", "b5", "b6", "b7",
2243 "b8", "b9", "b10", "b11",
2244 "b12", "b13", "b14", "b15",
2245 "b16", "b17", "b18", "b19",
2246 "b20", "b21", "b22", "b23",
2247 "b24", "b25", "b26", "b27",
2248 "b28", "b29", "b30", "b31",
2249 };
2250
2251 regnum -= gdbarch_num_regs (gdbarch);
2252
2253 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2254 return q_name[regnum - AARCH64_Q0_REGNUM];
2255
2256 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2257 return d_name[regnum - AARCH64_D0_REGNUM];
2258
2259 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2260 return s_name[regnum - AARCH64_S0_REGNUM];
2261
2262 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2263 return h_name[regnum - AARCH64_H0_REGNUM];
2264
2265 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2266 return b_name[regnum - AARCH64_B0_REGNUM];
2267
2268 if (tdep->has_sve ())
2269 {
2270 static const char *const sve_v_name[] =
2271 {
2272 "v0", "v1", "v2", "v3",
2273 "v4", "v5", "v6", "v7",
2274 "v8", "v9", "v10", "v11",
2275 "v12", "v13", "v14", "v15",
2276 "v16", "v17", "v18", "v19",
2277 "v20", "v21", "v22", "v23",
2278 "v24", "v25", "v26", "v27",
2279 "v28", "v29", "v30", "v31",
2280 };
2281
2282 if (regnum >= AARCH64_SVE_V0_REGNUM
2283 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2284 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2285 }
2286
2287 internal_error (__FILE__, __LINE__,
2288 _("aarch64_pseudo_register_name: bad register number %d"),
2289 regnum);
2290 }
2291
2292 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2293
2294 static struct type *
2295 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2296 {
2297 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2298
2299 regnum -= gdbarch_num_regs (gdbarch);
2300
2301 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2302 return aarch64_vnq_type (gdbarch);
2303
2304 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2305 return aarch64_vnd_type (gdbarch);
2306
2307 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2308 return aarch64_vns_type (gdbarch);
2309
2310 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2311 return aarch64_vnh_type (gdbarch);
2312
2313 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2314 return aarch64_vnb_type (gdbarch);
2315
2316 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2317 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2318 return aarch64_vnv_type (gdbarch);
2319
2320 internal_error (__FILE__, __LINE__,
2321 _("aarch64_pseudo_register_type: bad register number %d"),
2322 regnum);
2323 }
2324
2325 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2326
2327 static int
2328 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2329 struct reggroup *group)
2330 {
2331 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2332
2333 regnum -= gdbarch_num_regs (gdbarch);
2334
2335 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2336 return group == all_reggroup || group == vector_reggroup;
2337 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2338 return (group == all_reggroup || group == vector_reggroup
2339 || group == float_reggroup);
2340 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2341 return (group == all_reggroup || group == vector_reggroup
2342 || group == float_reggroup);
2343 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2344 return group == all_reggroup || group == vector_reggroup;
2345 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2346 return group == all_reggroup || group == vector_reggroup;
2347 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2348 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2349 return group == all_reggroup || group == vector_reggroup;
2350
2351 return group == all_reggroup;
2352 }
2353
2354 /* Helper for aarch64_pseudo_read_value. */
2355
2356 static struct value *
2357 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2358 readable_regcache *regcache, int regnum_offset,
2359 int regsize, struct value *result_value)
2360 {
2361 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2362
2363 /* Enough space for a full vector register. */
2364 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2365 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2366
2367 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2368 mark_value_bytes_unavailable (result_value, 0,
2369 TYPE_LENGTH (value_type (result_value)));
2370 else
2371 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2372
2373 return result_value;
2374 }
2375
2376 /* Implement the "pseudo_register_read_value" gdbarch method. */
2377
2378 static struct value *
2379 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2380 int regnum)
2381 {
2382 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2383 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2384
2385 VALUE_LVAL (result_value) = lval_register;
2386 VALUE_REGNUM (result_value) = regnum;
2387
2388 regnum -= gdbarch_num_regs (gdbarch);
2389
2390 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2391 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2392 regnum - AARCH64_Q0_REGNUM,
2393 Q_REGISTER_SIZE, result_value);
2394
2395 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2396 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2397 regnum - AARCH64_D0_REGNUM,
2398 D_REGISTER_SIZE, result_value);
2399
2400 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2401 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2402 regnum - AARCH64_S0_REGNUM,
2403 S_REGISTER_SIZE, result_value);
2404
2405 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2406 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2407 regnum - AARCH64_H0_REGNUM,
2408 H_REGISTER_SIZE, result_value);
2409
2410 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2411 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2412 regnum - AARCH64_B0_REGNUM,
2413 B_REGISTER_SIZE, result_value);
2414
2415 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2416 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2417 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2418 regnum - AARCH64_SVE_V0_REGNUM,
2419 V_REGISTER_SIZE, result_value);
2420
2421 gdb_assert_not_reached ("regnum out of bound");
2422 }
2423
2424 /* Helper for aarch64_pseudo_write. */
2425
2426 static void
2427 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2428 int regnum_offset, int regsize, const gdb_byte *buf)
2429 {
2430 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2431
2432 /* Enough space for a full vector register. */
2433 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2434 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2435
2436 /* Ensure the register buffer is zero, we want gdb writes of the
2437 various 'scalar' pseudo registers to behavior like architectural
2438 writes, register width bytes are written the remainder are set to
2439 zero. */
2440 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2441
2442 memcpy (reg_buf, buf, regsize);
2443 regcache->raw_write (v_regnum, reg_buf);
2444 }
2445
2446 /* Implement the "pseudo_register_write" gdbarch method. */
2447
2448 static void
2449 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2450 int regnum, const gdb_byte *buf)
2451 {
2452 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2453 regnum -= gdbarch_num_regs (gdbarch);
2454
2455 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2456 return aarch64_pseudo_write_1 (gdbarch, regcache,
2457 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2458 buf);
2459
2460 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2461 return aarch64_pseudo_write_1 (gdbarch, regcache,
2462 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2463 buf);
2464
2465 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2466 return aarch64_pseudo_write_1 (gdbarch, regcache,
2467 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2468 buf);
2469
2470 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2471 return aarch64_pseudo_write_1 (gdbarch, regcache,
2472 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2473 buf);
2474
2475 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2476 return aarch64_pseudo_write_1 (gdbarch, regcache,
2477 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2478 buf);
2479
2480 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2481 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2482 return aarch64_pseudo_write_1 (gdbarch, regcache,
2483 regnum - AARCH64_SVE_V0_REGNUM,
2484 V_REGISTER_SIZE, buf);
2485
2486 gdb_assert_not_reached ("regnum out of bound");
2487 }
2488
2489 /* Callback function for user_reg_add. */
2490
2491 static struct value *
2492 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2493 {
2494 const int *reg_p = (const int *) baton;
2495
2496 return value_of_register (*reg_p, frame);
2497 }
2498 \f
2499
2500 /* Implement the "software_single_step" gdbarch method, needed to
2501 single step through atomic sequences on AArch64. */
2502
2503 static std::vector<CORE_ADDR>
2504 aarch64_software_single_step (struct regcache *regcache)
2505 {
2506 struct gdbarch *gdbarch = regcache->arch ();
2507 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2508 const int insn_size = 4;
2509 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2510 CORE_ADDR pc = regcache_read_pc (regcache);
2511 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2512 CORE_ADDR loc = pc;
2513 CORE_ADDR closing_insn = 0;
2514 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2515 byte_order_for_code);
2516 int index;
2517 int insn_count;
2518 int bc_insn_count = 0; /* Conditional branch instruction count. */
2519 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2520 aarch64_inst inst;
2521
2522 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2523 return {};
2524
2525 /* Look for a Load Exclusive instruction which begins the sequence. */
2526 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2527 return {};
2528
2529 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2530 {
2531 loc += insn_size;
2532 insn = read_memory_unsigned_integer (loc, insn_size,
2533 byte_order_for_code);
2534
2535 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2536 return {};
2537 /* Check if the instruction is a conditional branch. */
2538 if (inst.opcode->iclass == condbranch)
2539 {
2540 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2541
2542 if (bc_insn_count >= 1)
2543 return {};
2544
2545 /* It is, so we'll try to set a breakpoint at the destination. */
2546 breaks[1] = loc + inst.operands[0].imm.value;
2547
2548 bc_insn_count++;
2549 last_breakpoint++;
2550 }
2551
2552 /* Look for the Store Exclusive which closes the atomic sequence. */
2553 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2554 {
2555 closing_insn = loc;
2556 break;
2557 }
2558 }
2559
2560 /* We didn't find a closing Store Exclusive instruction, fall back. */
2561 if (!closing_insn)
2562 return {};
2563
2564 /* Insert breakpoint after the end of the atomic sequence. */
2565 breaks[0] = loc + insn_size;
2566
2567 /* Check for duplicated breakpoints, and also check that the second
2568 breakpoint is not within the atomic sequence. */
2569 if (last_breakpoint
2570 && (breaks[1] == breaks[0]
2571 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2572 last_breakpoint = 0;
2573
2574 std::vector<CORE_ADDR> next_pcs;
2575
2576 /* Insert the breakpoint at the end of the sequence, and one at the
2577 destination of the conditional branch, if it exists. */
2578 for (index = 0; index <= last_breakpoint; index++)
2579 next_pcs.push_back (breaks[index]);
2580
2581 return next_pcs;
2582 }
2583
2584 struct aarch64_displaced_step_closure : public displaced_step_closure
2585 {
2586 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2587 is being displaced stepping. */
2588 int cond = 0;
2589
2590 /* PC adjustment offset after displaced stepping. */
2591 int32_t pc_adjust = 0;
2592 };
2593
2594 /* Data when visiting instructions for displaced stepping. */
2595
2596 struct aarch64_displaced_step_data
2597 {
2598 struct aarch64_insn_data base;
2599
2600 /* The address where the instruction will be executed at. */
2601 CORE_ADDR new_addr;
2602 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2603 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2604 /* Number of instructions in INSN_BUF. */
2605 unsigned insn_count;
2606 /* Registers when doing displaced stepping. */
2607 struct regcache *regs;
2608
2609 aarch64_displaced_step_closure *dsc;
2610 };
2611
2612 /* Implementation of aarch64_insn_visitor method "b". */
2613
2614 static void
2615 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2616 struct aarch64_insn_data *data)
2617 {
2618 struct aarch64_displaced_step_data *dsd
2619 = (struct aarch64_displaced_step_data *) data;
2620 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2621
2622 if (can_encode_int32 (new_offset, 28))
2623 {
2624 /* Emit B rather than BL, because executing BL on a new address
2625 will get the wrong address into LR. In order to avoid this,
2626 we emit B, and update LR if the instruction is BL. */
2627 emit_b (dsd->insn_buf, 0, new_offset);
2628 dsd->insn_count++;
2629 }
2630 else
2631 {
2632 /* Write NOP. */
2633 emit_nop (dsd->insn_buf);
2634 dsd->insn_count++;
2635 dsd->dsc->pc_adjust = offset;
2636 }
2637
2638 if (is_bl)
2639 {
2640 /* Update LR. */
2641 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2642 data->insn_addr + 4);
2643 }
2644 }
2645
2646 /* Implementation of aarch64_insn_visitor method "b_cond". */
2647
2648 static void
2649 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2650 struct aarch64_insn_data *data)
2651 {
2652 struct aarch64_displaced_step_data *dsd
2653 = (struct aarch64_displaced_step_data *) data;
2654
2655 /* GDB has to fix up PC after displaced step this instruction
2656 differently according to the condition is true or false. Instead
2657 of checking COND against conditional flags, we can use
2658 the following instructions, and GDB can tell how to fix up PC
2659 according to the PC value.
2660
2661 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2662 INSN1 ;
2663 TAKEN:
2664 INSN2
2665 */
2666
2667 emit_bcond (dsd->insn_buf, cond, 8);
2668 dsd->dsc->cond = 1;
2669 dsd->dsc->pc_adjust = offset;
2670 dsd->insn_count = 1;
2671 }
2672
2673 /* Dynamically allocate a new register. If we know the register
2674 statically, we should make it a global as above instead of using this
2675 helper function. */
2676
2677 static struct aarch64_register
2678 aarch64_register (unsigned num, int is64)
2679 {
2680 return (struct aarch64_register) { num, is64 };
2681 }
2682
2683 /* Implementation of aarch64_insn_visitor method "cb". */
2684
2685 static void
2686 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2687 const unsigned rn, int is64,
2688 struct aarch64_insn_data *data)
2689 {
2690 struct aarch64_displaced_step_data *dsd
2691 = (struct aarch64_displaced_step_data *) data;
2692
2693 /* The offset is out of range for a compare and branch
2694 instruction. We can use the following instructions instead:
2695
2696 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2697 INSN1 ;
2698 TAKEN:
2699 INSN2
2700 */
2701 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2702 dsd->insn_count = 1;
2703 dsd->dsc->cond = 1;
2704 dsd->dsc->pc_adjust = offset;
2705 }
2706
2707 /* Implementation of aarch64_insn_visitor method "tb". */
2708
2709 static void
2710 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2711 const unsigned rt, unsigned bit,
2712 struct aarch64_insn_data *data)
2713 {
2714 struct aarch64_displaced_step_data *dsd
2715 = (struct aarch64_displaced_step_data *) data;
2716
2717 /* The offset is out of range for a test bit and branch
2718 instruction We can use the following instructions instead:
2719
2720 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2721 INSN1 ;
2722 TAKEN:
2723 INSN2
2724
2725 */
2726 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2727 dsd->insn_count = 1;
2728 dsd->dsc->cond = 1;
2729 dsd->dsc->pc_adjust = offset;
2730 }
2731
2732 /* Implementation of aarch64_insn_visitor method "adr". */
2733
2734 static void
2735 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2736 const int is_adrp, struct aarch64_insn_data *data)
2737 {
2738 struct aarch64_displaced_step_data *dsd
2739 = (struct aarch64_displaced_step_data *) data;
2740 /* We know exactly the address the ADR{P,} instruction will compute.
2741 We can just write it to the destination register. */
2742 CORE_ADDR address = data->insn_addr + offset;
2743
2744 if (is_adrp)
2745 {
2746 /* Clear the lower 12 bits of the offset to get the 4K page. */
2747 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2748 address & ~0xfff);
2749 }
2750 else
2751 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2752 address);
2753
2754 dsd->dsc->pc_adjust = 4;
2755 emit_nop (dsd->insn_buf);
2756 dsd->insn_count = 1;
2757 }
2758
2759 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2760
2761 static void
2762 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2763 const unsigned rt, const int is64,
2764 struct aarch64_insn_data *data)
2765 {
2766 struct aarch64_displaced_step_data *dsd
2767 = (struct aarch64_displaced_step_data *) data;
2768 CORE_ADDR address = data->insn_addr + offset;
2769 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2770
2771 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2772 address);
2773
2774 if (is_sw)
2775 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2776 aarch64_register (rt, 1), zero);
2777 else
2778 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2779 aarch64_register (rt, 1), zero);
2780
2781 dsd->dsc->pc_adjust = 4;
2782 }
2783
2784 /* Implementation of aarch64_insn_visitor method "others". */
2785
2786 static void
2787 aarch64_displaced_step_others (const uint32_t insn,
2788 struct aarch64_insn_data *data)
2789 {
2790 struct aarch64_displaced_step_data *dsd
2791 = (struct aarch64_displaced_step_data *) data;
2792
2793 aarch64_emit_insn (dsd->insn_buf, insn);
2794 dsd->insn_count = 1;
2795
2796 if ((insn & 0xfffffc1f) == 0xd65f0000)
2797 {
2798 /* RET */
2799 dsd->dsc->pc_adjust = 0;
2800 }
2801 else
2802 dsd->dsc->pc_adjust = 4;
2803 }
2804
2805 static const struct aarch64_insn_visitor visitor =
2806 {
2807 aarch64_displaced_step_b,
2808 aarch64_displaced_step_b_cond,
2809 aarch64_displaced_step_cb,
2810 aarch64_displaced_step_tb,
2811 aarch64_displaced_step_adr,
2812 aarch64_displaced_step_ldr_literal,
2813 aarch64_displaced_step_others,
2814 };
2815
2816 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2817
2818 struct displaced_step_closure *
2819 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2820 CORE_ADDR from, CORE_ADDR to,
2821 struct regcache *regs)
2822 {
2823 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2824 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2825 struct aarch64_displaced_step_data dsd;
2826 aarch64_inst inst;
2827
2828 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2829 return NULL;
2830
2831 /* Look for a Load Exclusive instruction which begins the sequence. */
2832 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2833 {
2834 /* We can't displaced step atomic sequences. */
2835 return NULL;
2836 }
2837
2838 std::unique_ptr<aarch64_displaced_step_closure> dsc
2839 (new aarch64_displaced_step_closure);
2840 dsd.base.insn_addr = from;
2841 dsd.new_addr = to;
2842 dsd.regs = regs;
2843 dsd.dsc = dsc.get ();
2844 dsd.insn_count = 0;
2845 aarch64_relocate_instruction (insn, &visitor,
2846 (struct aarch64_insn_data *) &dsd);
2847 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2848
2849 if (dsd.insn_count != 0)
2850 {
2851 int i;
2852
2853 /* Instruction can be relocated to scratch pad. Copy
2854 relocated instruction(s) there. */
2855 for (i = 0; i < dsd.insn_count; i++)
2856 {
2857 if (debug_displaced)
2858 {
2859 debug_printf ("displaced: writing insn ");
2860 debug_printf ("%.8x", dsd.insn_buf[i]);
2861 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2862 }
2863 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2864 (ULONGEST) dsd.insn_buf[i]);
2865 }
2866 }
2867 else
2868 {
2869 dsc = NULL;
2870 }
2871
2872 return dsc.release ();
2873 }
2874
2875 /* Implement the "displaced_step_fixup" gdbarch method. */
2876
2877 void
2878 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2879 struct displaced_step_closure *dsc_,
2880 CORE_ADDR from, CORE_ADDR to,
2881 struct regcache *regs)
2882 {
2883 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2884
2885 if (dsc->cond)
2886 {
2887 ULONGEST pc;
2888
2889 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2890 if (pc - to == 8)
2891 {
2892 /* Condition is true. */
2893 }
2894 else if (pc - to == 4)
2895 {
2896 /* Condition is false. */
2897 dsc->pc_adjust = 4;
2898 }
2899 else
2900 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2901 }
2902
2903 if (dsc->pc_adjust != 0)
2904 {
2905 if (debug_displaced)
2906 {
2907 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2908 paddress (gdbarch, from), dsc->pc_adjust);
2909 }
2910 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2911 from + dsc->pc_adjust);
2912 }
2913 }
2914
2915 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2916
2917 int
2918 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2919 struct displaced_step_closure *closure)
2920 {
2921 return 1;
2922 }
2923
2924 /* Get the correct target description for the given VQ value.
2925 If VQ is zero then it is assumed SVE is not supported.
2926 (It is not possible to set VQ to zero on an SVE system). */
2927
2928 const target_desc *
2929 aarch64_read_description (uint64_t vq)
2930 {
2931 if (vq > AARCH64_MAX_SVE_VQ)
2932 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2933 AARCH64_MAX_SVE_VQ);
2934
2935 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2936
2937 if (tdesc == NULL)
2938 {
2939 tdesc = aarch64_create_target_description (vq);
2940 tdesc_aarch64_list[vq] = tdesc;
2941 }
2942
2943 return tdesc;
2944 }
2945
2946 /* Return the VQ used when creating the target description TDESC. */
2947
2948 static uint64_t
2949 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2950 {
2951 const struct tdesc_feature *feature_sve;
2952
2953 if (!tdesc_has_registers (tdesc))
2954 return 0;
2955
2956 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2957
2958 if (feature_sve == nullptr)
2959 return 0;
2960
2961 uint64_t vl = tdesc_register_bitsize (feature_sve,
2962 aarch64_sve_register_names[0]) / 8;
2963 return sve_vq_from_vl (vl);
2964 }
2965
2966
2967 /* Initialize the current architecture based on INFO. If possible,
2968 re-use an architecture from ARCHES, which is a list of
2969 architectures already created during this debugging session.
2970
2971 Called e.g. at program startup, when reading a core file, and when
2972 reading a binary file. */
2973
2974 static struct gdbarch *
2975 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2976 {
2977 struct gdbarch_tdep *tdep;
2978 struct gdbarch *gdbarch;
2979 struct gdbarch_list *best_arch;
2980 struct tdesc_arch_data *tdesc_data = NULL;
2981 const struct target_desc *tdesc = info.target_desc;
2982 int i;
2983 int valid_p = 1;
2984 const struct tdesc_feature *feature_core;
2985 const struct tdesc_feature *feature_fpu;
2986 const struct tdesc_feature *feature_sve;
2987 int num_regs = 0;
2988 int num_pseudo_regs = 0;
2989
2990 /* Ensure we always have a target description. */
2991 if (!tdesc_has_registers (tdesc))
2992 tdesc = aarch64_read_description (0);
2993 gdb_assert (tdesc);
2994
2995 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2996 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2997 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2998
2999 if (feature_core == NULL)
3000 return NULL;
3001
3002 tdesc_data = tdesc_data_alloc ();
3003
3004 /* Validate the description provides the mandatory core R registers
3005 and allocate their numbers. */
3006 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3007 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3008 AARCH64_X0_REGNUM + i,
3009 aarch64_r_register_names[i]);
3010
3011 num_regs = AARCH64_X0_REGNUM + i;
3012
3013 /* Add the V registers. */
3014 if (feature_fpu != NULL)
3015 {
3016 if (feature_sve != NULL)
3017 error (_("Program contains both fpu and SVE features."));
3018
3019 /* Validate the description provides the mandatory V registers
3020 and allocate their numbers. */
3021 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3022 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3023 AARCH64_V0_REGNUM + i,
3024 aarch64_v_register_names[i]);
3025
3026 num_regs = AARCH64_V0_REGNUM + i;
3027 }
3028
3029 /* Add the SVE registers. */
3030 if (feature_sve != NULL)
3031 {
3032 /* Validate the description provides the mandatory SVE registers
3033 and allocate their numbers. */
3034 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3035 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3036 AARCH64_SVE_Z0_REGNUM + i,
3037 aarch64_sve_register_names[i]);
3038
3039 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3040 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3041 }
3042
3043 if (feature_fpu != NULL || feature_sve != NULL)
3044 {
3045 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3046 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3047 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3048 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3049 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3050 }
3051
3052 if (!valid_p)
3053 {
3054 tdesc_data_cleanup (tdesc_data);
3055 return NULL;
3056 }
3057
3058 /* AArch64 code is always little-endian. */
3059 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3060
3061 /* If there is already a candidate, use it. */
3062 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3063 best_arch != NULL;
3064 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3065 {
3066 /* Found a match. */
3067 break;
3068 }
3069
3070 if (best_arch != NULL)
3071 {
3072 if (tdesc_data != NULL)
3073 tdesc_data_cleanup (tdesc_data);
3074 return best_arch->gdbarch;
3075 }
3076
3077 tdep = XCNEW (struct gdbarch_tdep);
3078 gdbarch = gdbarch_alloc (&info, tdep);
3079
3080 /* This should be low enough for everything. */
3081 tdep->lowest_pc = 0x20;
3082 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3083 tdep->jb_elt_size = 8;
3084 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3085
3086 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3087 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3088
3089 /* Frame handling. */
3090 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3091 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3092 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3093
3094 /* Advance PC across function entry code. */
3095 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3096
3097 /* The stack grows downward. */
3098 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3099
3100 /* Breakpoint manipulation. */
3101 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3102 aarch64_breakpoint::kind_from_pc);
3103 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3104 aarch64_breakpoint::bp_from_kind);
3105 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3106 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3107
3108 /* Information about registers, etc. */
3109 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3110 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3111 set_gdbarch_num_regs (gdbarch, num_regs);
3112
3113 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3114 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3115 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3116 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3117 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3118 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3119 aarch64_pseudo_register_reggroup_p);
3120
3121 /* ABI */
3122 set_gdbarch_short_bit (gdbarch, 16);
3123 set_gdbarch_int_bit (gdbarch, 32);
3124 set_gdbarch_float_bit (gdbarch, 32);
3125 set_gdbarch_double_bit (gdbarch, 64);
3126 set_gdbarch_long_double_bit (gdbarch, 128);
3127 set_gdbarch_long_bit (gdbarch, 64);
3128 set_gdbarch_long_long_bit (gdbarch, 64);
3129 set_gdbarch_ptr_bit (gdbarch, 64);
3130 set_gdbarch_char_signed (gdbarch, 0);
3131 set_gdbarch_wchar_signed (gdbarch, 0);
3132 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3133 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3134 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3135
3136 /* Internal <-> external register number maps. */
3137 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3138
3139 /* Returning results. */
3140 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3141
3142 /* Disassembly. */
3143 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3144
3145 /* Virtual tables. */
3146 set_gdbarch_vbit_in_delta (gdbarch, 1);
3147
3148 /* Hook in the ABI-specific overrides, if they have been registered. */
3149 info.target_desc = tdesc;
3150 info.tdesc_data = tdesc_data;
3151 gdbarch_init_osabi (info, gdbarch);
3152
3153 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3154
3155 /* Add some default predicates. */
3156 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3157 dwarf2_append_unwinders (gdbarch);
3158 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3159
3160 frame_base_set_default (gdbarch, &aarch64_normal_base);
3161
3162 /* Now we have tuned the configuration, set a few final things,
3163 based on what the OS ABI has told us. */
3164
3165 if (tdep->jb_pc >= 0)
3166 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3167
3168 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3169
3170 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3171
3172 /* Add standard register aliases. */
3173 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3174 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3175 value_of_aarch64_user_reg,
3176 &aarch64_register_aliases[i].regnum);
3177
3178 register_aarch64_ravenscar_ops (gdbarch);
3179
3180 return gdbarch;
3181 }
3182
3183 static void
3184 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3185 {
3186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3187
3188 if (tdep == NULL)
3189 return;
3190
3191 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3192 paddress (gdbarch, tdep->lowest_pc));
3193 }
3194
3195 #if GDB_SELF_TEST
3196 namespace selftests
3197 {
3198 static void aarch64_process_record_test (void);
3199 }
3200 #endif
3201
3202 void
3203 _initialize_aarch64_tdep (void)
3204 {
3205 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3206 aarch64_dump_tdep);
3207
3208 /* Debug this file's internals. */
3209 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3210 Set AArch64 debugging."), _("\
3211 Show AArch64 debugging."), _("\
3212 When on, AArch64 specific debugging is enabled."),
3213 NULL,
3214 show_aarch64_debug,
3215 &setdebuglist, &showdebuglist);
3216
3217 #if GDB_SELF_TEST
3218 selftests::register_test ("aarch64-analyze-prologue",
3219 selftests::aarch64_analyze_prologue_test);
3220 selftests::register_test ("aarch64-process-record",
3221 selftests::aarch64_process_record_test);
3222 selftests::record_xml_tdesc ("aarch64.xml",
3223 aarch64_create_target_description (0));
3224 #endif
3225 }
3226
3227 /* AArch64 process record-replay related structures, defines etc. */
3228
3229 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3230 do \
3231 { \
3232 unsigned int reg_len = LENGTH; \
3233 if (reg_len) \
3234 { \
3235 REGS = XNEWVEC (uint32_t, reg_len); \
3236 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3237 } \
3238 } \
3239 while (0)
3240
3241 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3242 do \
3243 { \
3244 unsigned int mem_len = LENGTH; \
3245 if (mem_len) \
3246 { \
3247 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3248 memcpy(&MEMS->len, &RECORD_BUF[0], \
3249 sizeof(struct aarch64_mem_r) * LENGTH); \
3250 } \
3251 } \
3252 while (0)
3253
3254 /* AArch64 record/replay structures and enumerations. */
3255
3256 struct aarch64_mem_r
3257 {
3258 uint64_t len; /* Record length. */
3259 uint64_t addr; /* Memory address. */
3260 };
3261
3262 enum aarch64_record_result
3263 {
3264 AARCH64_RECORD_SUCCESS,
3265 AARCH64_RECORD_UNSUPPORTED,
3266 AARCH64_RECORD_UNKNOWN
3267 };
3268
3269 typedef struct insn_decode_record_t
3270 {
3271 struct gdbarch *gdbarch;
3272 struct regcache *regcache;
3273 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3274 uint32_t aarch64_insn; /* Insn to be recorded. */
3275 uint32_t mem_rec_count; /* Count of memory records. */
3276 uint32_t reg_rec_count; /* Count of register records. */
3277 uint32_t *aarch64_regs; /* Registers to be recorded. */
3278 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3279 } insn_decode_record;
3280
3281 /* Record handler for data processing - register instructions. */
3282
3283 static unsigned int
3284 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3285 {
3286 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3287 uint32_t record_buf[4];
3288
3289 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3290 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3291 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3292
3293 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3294 {
3295 uint8_t setflags;
3296
3297 /* Logical (shifted register). */
3298 if (insn_bits24_27 == 0x0a)
3299 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3300 /* Add/subtract. */
3301 else if (insn_bits24_27 == 0x0b)
3302 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3303 else
3304 return AARCH64_RECORD_UNKNOWN;
3305
3306 record_buf[0] = reg_rd;
3307 aarch64_insn_r->reg_rec_count = 1;
3308 if (setflags)
3309 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3310 }
3311 else
3312 {
3313 if (insn_bits24_27 == 0x0b)
3314 {
3315 /* Data-processing (3 source). */
3316 record_buf[0] = reg_rd;
3317 aarch64_insn_r->reg_rec_count = 1;
3318 }
3319 else if (insn_bits24_27 == 0x0a)
3320 {
3321 if (insn_bits21_23 == 0x00)
3322 {
3323 /* Add/subtract (with carry). */
3324 record_buf[0] = reg_rd;
3325 aarch64_insn_r->reg_rec_count = 1;
3326 if (bit (aarch64_insn_r->aarch64_insn, 29))
3327 {
3328 record_buf[1] = AARCH64_CPSR_REGNUM;
3329 aarch64_insn_r->reg_rec_count = 2;
3330 }
3331 }
3332 else if (insn_bits21_23 == 0x02)
3333 {
3334 /* Conditional compare (register) and conditional compare
3335 (immediate) instructions. */
3336 record_buf[0] = AARCH64_CPSR_REGNUM;
3337 aarch64_insn_r->reg_rec_count = 1;
3338 }
3339 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3340 {
3341 /* CConditional select. */
3342 /* Data-processing (2 source). */
3343 /* Data-processing (1 source). */
3344 record_buf[0] = reg_rd;
3345 aarch64_insn_r->reg_rec_count = 1;
3346 }
3347 else
3348 return AARCH64_RECORD_UNKNOWN;
3349 }
3350 }
3351
3352 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3353 record_buf);
3354 return AARCH64_RECORD_SUCCESS;
3355 }
3356
3357 /* Record handler for data processing - immediate instructions. */
3358
3359 static unsigned int
3360 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3361 {
3362 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3363 uint32_t record_buf[4];
3364
3365 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3366 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3367 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3368
3369 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3370 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3371 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3372 {
3373 record_buf[0] = reg_rd;
3374 aarch64_insn_r->reg_rec_count = 1;
3375 }
3376 else if (insn_bits24_27 == 0x01)
3377 {
3378 /* Add/Subtract (immediate). */
3379 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3380 record_buf[0] = reg_rd;
3381 aarch64_insn_r->reg_rec_count = 1;
3382 if (setflags)
3383 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3384 }
3385 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3386 {
3387 /* Logical (immediate). */
3388 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3389 record_buf[0] = reg_rd;
3390 aarch64_insn_r->reg_rec_count = 1;
3391 if (setflags)
3392 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3393 }
3394 else
3395 return AARCH64_RECORD_UNKNOWN;
3396
3397 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3398 record_buf);
3399 return AARCH64_RECORD_SUCCESS;
3400 }
3401
3402 /* Record handler for branch, exception generation and system instructions. */
3403
3404 static unsigned int
3405 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3406 {
3407 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3408 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3409 uint32_t record_buf[4];
3410
3411 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3412 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3413 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3414
3415 if (insn_bits28_31 == 0x0d)
3416 {
3417 /* Exception generation instructions. */
3418 if (insn_bits24_27 == 0x04)
3419 {
3420 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3421 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3422 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3423 {
3424 ULONGEST svc_number;
3425
3426 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3427 &svc_number);
3428 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3429 svc_number);
3430 }
3431 else
3432 return AARCH64_RECORD_UNSUPPORTED;
3433 }
3434 /* System instructions. */
3435 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3436 {
3437 uint32_t reg_rt, reg_crn;
3438
3439 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3440 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3441
3442 /* Record rt in case of sysl and mrs instructions. */
3443 if (bit (aarch64_insn_r->aarch64_insn, 21))
3444 {
3445 record_buf[0] = reg_rt;
3446 aarch64_insn_r->reg_rec_count = 1;
3447 }
3448 /* Record cpsr for hint and msr(immediate) instructions. */
3449 else if (reg_crn == 0x02 || reg_crn == 0x04)
3450 {
3451 record_buf[0] = AARCH64_CPSR_REGNUM;
3452 aarch64_insn_r->reg_rec_count = 1;
3453 }
3454 }
3455 /* Unconditional branch (register). */
3456 else if((insn_bits24_27 & 0x0e) == 0x06)
3457 {
3458 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3459 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3460 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3461 }
3462 else
3463 return AARCH64_RECORD_UNKNOWN;
3464 }
3465 /* Unconditional branch (immediate). */
3466 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3467 {
3468 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3469 if (bit (aarch64_insn_r->aarch64_insn, 31))
3470 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3471 }
3472 else
3473 /* Compare & branch (immediate), Test & branch (immediate) and
3474 Conditional branch (immediate). */
3475 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3476
3477 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3478 record_buf);
3479 return AARCH64_RECORD_SUCCESS;
3480 }
3481
3482 /* Record handler for advanced SIMD load and store instructions. */
3483
3484 static unsigned int
3485 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3486 {
3487 CORE_ADDR address;
3488 uint64_t addr_offset = 0;
3489 uint32_t record_buf[24];
3490 uint64_t record_buf_mem[24];
3491 uint32_t reg_rn, reg_rt;
3492 uint32_t reg_index = 0, mem_index = 0;
3493 uint8_t opcode_bits, size_bits;
3494
3495 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3496 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3497 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3498 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3499 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3500
3501 if (record_debug)
3502 debug_printf ("Process record: Advanced SIMD load/store\n");
3503
3504 /* Load/store single structure. */
3505 if (bit (aarch64_insn_r->aarch64_insn, 24))
3506 {
3507 uint8_t sindex, scale, selem, esize, replicate = 0;
3508 scale = opcode_bits >> 2;
3509 selem = ((opcode_bits & 0x02) |
3510 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3511 switch (scale)
3512 {
3513 case 1:
3514 if (size_bits & 0x01)
3515 return AARCH64_RECORD_UNKNOWN;
3516 break;
3517 case 2:
3518 if ((size_bits >> 1) & 0x01)
3519 return AARCH64_RECORD_UNKNOWN;
3520 if (size_bits & 0x01)
3521 {
3522 if (!((opcode_bits >> 1) & 0x01))
3523 scale = 3;
3524 else
3525 return AARCH64_RECORD_UNKNOWN;
3526 }
3527 break;
3528 case 3:
3529 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3530 {
3531 scale = size_bits;
3532 replicate = 1;
3533 break;
3534 }
3535 else
3536 return AARCH64_RECORD_UNKNOWN;
3537 default:
3538 break;
3539 }
3540 esize = 8 << scale;
3541 if (replicate)
3542 for (sindex = 0; sindex < selem; sindex++)
3543 {
3544 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3545 reg_rt = (reg_rt + 1) % 32;
3546 }
3547 else
3548 {
3549 for (sindex = 0; sindex < selem; sindex++)
3550 {
3551 if (bit (aarch64_insn_r->aarch64_insn, 22))
3552 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3553 else
3554 {
3555 record_buf_mem[mem_index++] = esize / 8;
3556 record_buf_mem[mem_index++] = address + addr_offset;
3557 }
3558 addr_offset = addr_offset + (esize / 8);
3559 reg_rt = (reg_rt + 1) % 32;
3560 }
3561 }
3562 }
3563 /* Load/store multiple structure. */
3564 else
3565 {
3566 uint8_t selem, esize, rpt, elements;
3567 uint8_t eindex, rindex;
3568
3569 esize = 8 << size_bits;
3570 if (bit (aarch64_insn_r->aarch64_insn, 30))
3571 elements = 128 / esize;
3572 else
3573 elements = 64 / esize;
3574
3575 switch (opcode_bits)
3576 {
3577 /*LD/ST4 (4 Registers). */
3578 case 0:
3579 rpt = 1;
3580 selem = 4;
3581 break;
3582 /*LD/ST1 (4 Registers). */
3583 case 2:
3584 rpt = 4;
3585 selem = 1;
3586 break;
3587 /*LD/ST3 (3 Registers). */
3588 case 4:
3589 rpt = 1;
3590 selem = 3;
3591 break;
3592 /*LD/ST1 (3 Registers). */
3593 case 6:
3594 rpt = 3;
3595 selem = 1;
3596 break;
3597 /*LD/ST1 (1 Register). */
3598 case 7:
3599 rpt = 1;
3600 selem = 1;
3601 break;
3602 /*LD/ST2 (2 Registers). */
3603 case 8:
3604 rpt = 1;
3605 selem = 2;
3606 break;
3607 /*LD/ST1 (2 Registers). */
3608 case 10:
3609 rpt = 2;
3610 selem = 1;
3611 break;
3612 default:
3613 return AARCH64_RECORD_UNSUPPORTED;
3614 break;
3615 }
3616 for (rindex = 0; rindex < rpt; rindex++)
3617 for (eindex = 0; eindex < elements; eindex++)
3618 {
3619 uint8_t reg_tt, sindex;
3620 reg_tt = (reg_rt + rindex) % 32;
3621 for (sindex = 0; sindex < selem; sindex++)
3622 {
3623 if (bit (aarch64_insn_r->aarch64_insn, 22))
3624 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3625 else
3626 {
3627 record_buf_mem[mem_index++] = esize / 8;
3628 record_buf_mem[mem_index++] = address + addr_offset;
3629 }
3630 addr_offset = addr_offset + (esize / 8);
3631 reg_tt = (reg_tt + 1) % 32;
3632 }
3633 }
3634 }
3635
3636 if (bit (aarch64_insn_r->aarch64_insn, 23))
3637 record_buf[reg_index++] = reg_rn;
3638
3639 aarch64_insn_r->reg_rec_count = reg_index;
3640 aarch64_insn_r->mem_rec_count = mem_index / 2;
3641 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3642 record_buf_mem);
3643 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3644 record_buf);
3645 return AARCH64_RECORD_SUCCESS;
3646 }
3647
3648 /* Record handler for load and store instructions. */
3649
3650 static unsigned int
3651 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3652 {
3653 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3654 uint8_t insn_bit23, insn_bit21;
3655 uint8_t opc, size_bits, ld_flag, vector_flag;
3656 uint32_t reg_rn, reg_rt, reg_rt2;
3657 uint64_t datasize, offset;
3658 uint32_t record_buf[8];
3659 uint64_t record_buf_mem[8];
3660 CORE_ADDR address;
3661
3662 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3663 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3664 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3665 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3666 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3667 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3668 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3669 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3670 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3671 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3672 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3673
3674 /* Load/store exclusive. */
3675 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3676 {
3677 if (record_debug)
3678 debug_printf ("Process record: load/store exclusive\n");
3679
3680 if (ld_flag)
3681 {
3682 record_buf[0] = reg_rt;
3683 aarch64_insn_r->reg_rec_count = 1;
3684 if (insn_bit21)
3685 {
3686 record_buf[1] = reg_rt2;
3687 aarch64_insn_r->reg_rec_count = 2;
3688 }
3689 }
3690 else
3691 {
3692 if (insn_bit21)
3693 datasize = (8 << size_bits) * 2;
3694 else
3695 datasize = (8 << size_bits);
3696 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3697 &address);
3698 record_buf_mem[0] = datasize / 8;
3699 record_buf_mem[1] = address;
3700 aarch64_insn_r->mem_rec_count = 1;
3701 if (!insn_bit23)
3702 {
3703 /* Save register rs. */
3704 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3705 aarch64_insn_r->reg_rec_count = 1;
3706 }
3707 }
3708 }
3709 /* Load register (literal) instructions decoding. */
3710 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3711 {
3712 if (record_debug)
3713 debug_printf ("Process record: load register (literal)\n");
3714 if (vector_flag)
3715 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3716 else
3717 record_buf[0] = reg_rt;
3718 aarch64_insn_r->reg_rec_count = 1;
3719 }
3720 /* All types of load/store pair instructions decoding. */
3721 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3722 {
3723 if (record_debug)
3724 debug_printf ("Process record: load/store pair\n");
3725
3726 if (ld_flag)
3727 {
3728 if (vector_flag)
3729 {
3730 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3731 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3732 }
3733 else
3734 {
3735 record_buf[0] = reg_rt;
3736 record_buf[1] = reg_rt2;
3737 }
3738 aarch64_insn_r->reg_rec_count = 2;
3739 }
3740 else
3741 {
3742 uint16_t imm7_off;
3743 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3744 if (!vector_flag)
3745 size_bits = size_bits >> 1;
3746 datasize = 8 << (2 + size_bits);
3747 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3748 offset = offset << (2 + size_bits);
3749 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3750 &address);
3751 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3752 {
3753 if (imm7_off & 0x40)
3754 address = address - offset;
3755 else
3756 address = address + offset;
3757 }
3758
3759 record_buf_mem[0] = datasize / 8;
3760 record_buf_mem[1] = address;
3761 record_buf_mem[2] = datasize / 8;
3762 record_buf_mem[3] = address + (datasize / 8);
3763 aarch64_insn_r->mem_rec_count = 2;
3764 }
3765 if (bit (aarch64_insn_r->aarch64_insn, 23))
3766 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3767 }
3768 /* Load/store register (unsigned immediate) instructions. */
3769 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3770 {
3771 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3772 if (!(opc >> 1))
3773 {
3774 if (opc & 0x01)
3775 ld_flag = 0x01;
3776 else
3777 ld_flag = 0x0;
3778 }
3779 else
3780 {
3781 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3782 {
3783 /* PRFM (immediate) */
3784 return AARCH64_RECORD_SUCCESS;
3785 }
3786 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3787 {
3788 /* LDRSW (immediate) */
3789 ld_flag = 0x1;
3790 }
3791 else
3792 {
3793 if (opc & 0x01)
3794 ld_flag = 0x01;
3795 else
3796 ld_flag = 0x0;
3797 }
3798 }
3799
3800 if (record_debug)
3801 {
3802 debug_printf ("Process record: load/store (unsigned immediate):"
3803 " size %x V %d opc %x\n", size_bits, vector_flag,
3804 opc);
3805 }
3806
3807 if (!ld_flag)
3808 {
3809 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3810 datasize = 8 << size_bits;
3811 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3812 &address);
3813 offset = offset << size_bits;
3814 address = address + offset;
3815
3816 record_buf_mem[0] = datasize >> 3;
3817 record_buf_mem[1] = address;
3818 aarch64_insn_r->mem_rec_count = 1;
3819 }
3820 else
3821 {
3822 if (vector_flag)
3823 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3824 else
3825 record_buf[0] = reg_rt;
3826 aarch64_insn_r->reg_rec_count = 1;
3827 }
3828 }
3829 /* Load/store register (register offset) instructions. */
3830 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3831 && insn_bits10_11 == 0x02 && insn_bit21)
3832 {
3833 if (record_debug)
3834 debug_printf ("Process record: load/store (register offset)\n");
3835 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3836 if (!(opc >> 1))
3837 if (opc & 0x01)
3838 ld_flag = 0x01;
3839 else
3840 ld_flag = 0x0;
3841 else
3842 if (size_bits != 0x03)
3843 ld_flag = 0x01;
3844 else
3845 return AARCH64_RECORD_UNKNOWN;
3846
3847 if (!ld_flag)
3848 {
3849 ULONGEST reg_rm_val;
3850
3851 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3852 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3853 if (bit (aarch64_insn_r->aarch64_insn, 12))
3854 offset = reg_rm_val << size_bits;
3855 else
3856 offset = reg_rm_val;
3857 datasize = 8 << size_bits;
3858 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3859 &address);
3860 address = address + offset;
3861 record_buf_mem[0] = datasize >> 3;
3862 record_buf_mem[1] = address;
3863 aarch64_insn_r->mem_rec_count = 1;
3864 }
3865 else
3866 {
3867 if (vector_flag)
3868 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3869 else
3870 record_buf[0] = reg_rt;
3871 aarch64_insn_r->reg_rec_count = 1;
3872 }
3873 }
3874 /* Load/store register (immediate and unprivileged) instructions. */
3875 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3876 && !insn_bit21)
3877 {
3878 if (record_debug)
3879 {
3880 debug_printf ("Process record: load/store "
3881 "(immediate and unprivileged)\n");
3882 }
3883 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3884 if (!(opc >> 1))
3885 if (opc & 0x01)
3886 ld_flag = 0x01;
3887 else
3888 ld_flag = 0x0;
3889 else
3890 if (size_bits != 0x03)
3891 ld_flag = 0x01;
3892 else
3893 return AARCH64_RECORD_UNKNOWN;
3894
3895 if (!ld_flag)
3896 {
3897 uint16_t imm9_off;
3898 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3899 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3900 datasize = 8 << size_bits;
3901 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3902 &address);
3903 if (insn_bits10_11 != 0x01)
3904 {
3905 if (imm9_off & 0x0100)
3906 address = address - offset;
3907 else
3908 address = address + offset;
3909 }
3910 record_buf_mem[0] = datasize >> 3;
3911 record_buf_mem[1] = address;
3912 aarch64_insn_r->mem_rec_count = 1;
3913 }
3914 else
3915 {
3916 if (vector_flag)
3917 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3918 else
3919 record_buf[0] = reg_rt;
3920 aarch64_insn_r->reg_rec_count = 1;
3921 }
3922 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3923 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3924 }
3925 /* Advanced SIMD load/store instructions. */
3926 else
3927 return aarch64_record_asimd_load_store (aarch64_insn_r);
3928
3929 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3930 record_buf_mem);
3931 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3932 record_buf);
3933 return AARCH64_RECORD_SUCCESS;
3934 }
3935
3936 /* Record handler for data processing SIMD and floating point instructions. */
3937
3938 static unsigned int
3939 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3940 {
3941 uint8_t insn_bit21, opcode, rmode, reg_rd;
3942 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3943 uint8_t insn_bits11_14;
3944 uint32_t record_buf[2];
3945
3946 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3947 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3948 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3949 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3950 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3951 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3952 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3953 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3954 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3955
3956 if (record_debug)
3957 debug_printf ("Process record: data processing SIMD/FP: ");
3958
3959 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3960 {
3961 /* Floating point - fixed point conversion instructions. */
3962 if (!insn_bit21)
3963 {
3964 if (record_debug)
3965 debug_printf ("FP - fixed point conversion");
3966
3967 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3968 record_buf[0] = reg_rd;
3969 else
3970 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3971 }
3972 /* Floating point - conditional compare instructions. */
3973 else if (insn_bits10_11 == 0x01)
3974 {
3975 if (record_debug)
3976 debug_printf ("FP - conditional compare");
3977
3978 record_buf[0] = AARCH64_CPSR_REGNUM;
3979 }
3980 /* Floating point - data processing (2-source) and
3981 conditional select instructions. */
3982 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3983 {
3984 if (record_debug)
3985 debug_printf ("FP - DP (2-source)");
3986
3987 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3988 }
3989 else if (insn_bits10_11 == 0x00)
3990 {
3991 /* Floating point - immediate instructions. */
3992 if ((insn_bits12_15 & 0x01) == 0x01
3993 || (insn_bits12_15 & 0x07) == 0x04)
3994 {
3995 if (record_debug)
3996 debug_printf ("FP - immediate");
3997 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3998 }
3999 /* Floating point - compare instructions. */
4000 else if ((insn_bits12_15 & 0x03) == 0x02)
4001 {
4002 if (record_debug)
4003 debug_printf ("FP - immediate");
4004 record_buf[0] = AARCH64_CPSR_REGNUM;
4005 }
4006 /* Floating point - integer conversions instructions. */
4007 else if (insn_bits12_15 == 0x00)
4008 {
4009 /* Convert float to integer instruction. */
4010 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4011 {
4012 if (record_debug)
4013 debug_printf ("float to int conversion");
4014
4015 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4016 }
4017 /* Convert integer to float instruction. */
4018 else if ((opcode >> 1) == 0x01 && !rmode)
4019 {
4020 if (record_debug)
4021 debug_printf ("int to float conversion");
4022
4023 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4024 }
4025 /* Move float to integer instruction. */
4026 else if ((opcode >> 1) == 0x03)
4027 {
4028 if (record_debug)
4029 debug_printf ("move float to int");
4030
4031 if (!(opcode & 0x01))
4032 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4033 else
4034 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4035 }
4036 else
4037 return AARCH64_RECORD_UNKNOWN;
4038 }
4039 else
4040 return AARCH64_RECORD_UNKNOWN;
4041 }
4042 else
4043 return AARCH64_RECORD_UNKNOWN;
4044 }
4045 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4046 {
4047 if (record_debug)
4048 debug_printf ("SIMD copy");
4049
4050 /* Advanced SIMD copy instructions. */
4051 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4052 && !bit (aarch64_insn_r->aarch64_insn, 15)
4053 && bit (aarch64_insn_r->aarch64_insn, 10))
4054 {
4055 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4056 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4057 else
4058 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4059 }
4060 else
4061 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4062 }
4063 /* All remaining floating point or advanced SIMD instructions. */
4064 else
4065 {
4066 if (record_debug)
4067 debug_printf ("all remain");
4068
4069 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4070 }
4071
4072 if (record_debug)
4073 debug_printf ("\n");
4074
4075 aarch64_insn_r->reg_rec_count++;
4076 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4077 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4078 record_buf);
4079 return AARCH64_RECORD_SUCCESS;
4080 }
4081
4082 /* Decodes insns type and invokes its record handler. */
4083
4084 static unsigned int
4085 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4086 {
4087 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4088
4089 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4090 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4091 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4092 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4093
4094 /* Data processing - immediate instructions. */
4095 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4096 return aarch64_record_data_proc_imm (aarch64_insn_r);
4097
4098 /* Branch, exception generation and system instructions. */
4099 if (ins_bit26 && !ins_bit27 && ins_bit28)
4100 return aarch64_record_branch_except_sys (aarch64_insn_r);
4101
4102 /* Load and store instructions. */
4103 if (!ins_bit25 && ins_bit27)
4104 return aarch64_record_load_store (aarch64_insn_r);
4105
4106 /* Data processing - register instructions. */
4107 if (ins_bit25 && !ins_bit26 && ins_bit27)
4108 return aarch64_record_data_proc_reg (aarch64_insn_r);
4109
4110 /* Data processing - SIMD and floating point instructions. */
4111 if (ins_bit25 && ins_bit26 && ins_bit27)
4112 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4113
4114 return AARCH64_RECORD_UNSUPPORTED;
4115 }
4116
4117 /* Cleans up local record registers and memory allocations. */
4118
4119 static void
4120 deallocate_reg_mem (insn_decode_record *record)
4121 {
4122 xfree (record->aarch64_regs);
4123 xfree (record->aarch64_mems);
4124 }
4125
4126 #if GDB_SELF_TEST
4127 namespace selftests {
4128
4129 static void
4130 aarch64_process_record_test (void)
4131 {
4132 struct gdbarch_info info;
4133 uint32_t ret;
4134
4135 gdbarch_info_init (&info);
4136 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4137
4138 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4139 SELF_CHECK (gdbarch != NULL);
4140
4141 insn_decode_record aarch64_record;
4142
4143 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4144 aarch64_record.regcache = NULL;
4145 aarch64_record.this_addr = 0;
4146 aarch64_record.gdbarch = gdbarch;
4147
4148 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4149 aarch64_record.aarch64_insn = 0xf9800020;
4150 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4151 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4152 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4153 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4154
4155 deallocate_reg_mem (&aarch64_record);
4156 }
4157
4158 } // namespace selftests
4159 #endif /* GDB_SELF_TEST */
4160
4161 /* Parse the current instruction and record the values of the registers and
4162 memory that will be changed in current instruction to record_arch_list
4163 return -1 if something is wrong. */
4164
4165 int
4166 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4167 CORE_ADDR insn_addr)
4168 {
4169 uint32_t rec_no = 0;
4170 uint8_t insn_size = 4;
4171 uint32_t ret = 0;
4172 gdb_byte buf[insn_size];
4173 insn_decode_record aarch64_record;
4174
4175 memset (&buf[0], 0, insn_size);
4176 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4177 target_read_memory (insn_addr, &buf[0], insn_size);
4178 aarch64_record.aarch64_insn
4179 = (uint32_t) extract_unsigned_integer (&buf[0],
4180 insn_size,
4181 gdbarch_byte_order (gdbarch));
4182 aarch64_record.regcache = regcache;
4183 aarch64_record.this_addr = insn_addr;
4184 aarch64_record.gdbarch = gdbarch;
4185
4186 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4187 if (ret == AARCH64_RECORD_UNSUPPORTED)
4188 {
4189 printf_unfiltered (_("Process record does not support instruction "
4190 "0x%0x at address %s.\n"),
4191 aarch64_record.aarch64_insn,
4192 paddress (gdbarch, insn_addr));
4193 ret = -1;
4194 }
4195
4196 if (0 == ret)
4197 {
4198 /* Record registers. */
4199 record_full_arch_list_add_reg (aarch64_record.regcache,
4200 AARCH64_PC_REGNUM);
4201 /* Always record register CPSR. */
4202 record_full_arch_list_add_reg (aarch64_record.regcache,
4203 AARCH64_CPSR_REGNUM);
4204 if (aarch64_record.aarch64_regs)
4205 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4206 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4207 aarch64_record.aarch64_regs[rec_no]))
4208 ret = -1;
4209
4210 /* Record memories. */
4211 if (aarch64_record.aarch64_mems)
4212 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4213 if (record_full_arch_list_add_mem
4214 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4215 aarch64_record.aarch64_mems[rec_no].len))
4216 ret = -1;
4217
4218 if (record_full_arch_list_add_end ())
4219 ret = -1;
4220 }
4221
4222 deallocate_reg_mem (&aarch64_record);
4223 return ret;
4224 }
This page took 0.185622 seconds and 4 git commands to generate.