Add Aarch64 SVE target description
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72
73 /* All possible aarch64 target descriptors. */
74 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* AArch64 prologue cache structure. */
160 struct aarch64_prologue_cache
161 {
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
164 CORE_ADDR func;
165
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
168 stub frame. */
169 CORE_ADDR prev_pc;
170
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
174 CORE_ADDR prev_sp;
175
176 /* Is the target available to read from? */
177 int available_p;
178
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
182 int framesize;
183
184 /* The register used to hold the frame pointer for this frame. */
185 int framereg;
186
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg *saved_regs;
189 };
190
191 static void
192 show_aarch64_debug (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
194 {
195 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
196 }
197
198 namespace {
199
200 /* Abstract instruction reader. */
201
202 class abstract_instruction_reader
203 {
204 public:
205 /* Read in one instruction. */
206 virtual ULONGEST read (CORE_ADDR memaddr, int len,
207 enum bfd_endian byte_order) = 0;
208 };
209
210 /* Instruction reader from real target. */
211
212 class instruction_reader : public abstract_instruction_reader
213 {
214 public:
215 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
216 override
217 {
218 return read_code_unsigned_integer (memaddr, len, byte_order);
219 }
220 };
221
222 } // namespace
223
224 /* Analyze a prologue, looking for a recognizable stack frame
225 and frame pointer. Scan until we encounter a store that could
226 clobber the stack frame unexpectedly, or an unknown instruction. */
227
228 static CORE_ADDR
229 aarch64_analyze_prologue (struct gdbarch *gdbarch,
230 CORE_ADDR start, CORE_ADDR limit,
231 struct aarch64_prologue_cache *cache,
232 abstract_instruction_reader& reader)
233 {
234 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
235 int i;
236 /* Track X registers and D registers in prologue. */
237 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
238
239 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
240 regs[i] = pv_register (i, 0);
241 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
242
243 for (; start < limit; start += 4)
244 {
245 uint32_t insn;
246 aarch64_inst inst;
247
248 insn = reader.read (start, 4, byte_order_for_code);
249
250 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
251 break;
252
253 if (inst.opcode->iclass == addsub_imm
254 && (inst.opcode->op == OP_ADD
255 || strcmp ("sub", inst.opcode->name) == 0))
256 {
257 unsigned rd = inst.operands[0].reg.regno;
258 unsigned rn = inst.operands[1].reg.regno;
259
260 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
261 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
262 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
263 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
264
265 if (inst.opcode->op == OP_ADD)
266 {
267 regs[rd] = pv_add_constant (regs[rn],
268 inst.operands[2].imm.value);
269 }
270 else
271 {
272 regs[rd] = pv_add_constant (regs[rn],
273 -inst.operands[2].imm.value);
274 }
275 }
276 else if (inst.opcode->iclass == pcreladdr
277 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
278 {
279 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281
282 regs[inst.operands[0].reg.regno] = pv_unknown ();
283 }
284 else if (inst.opcode->iclass == branch_imm)
285 {
286 /* Stop analysis on branch. */
287 break;
288 }
289 else if (inst.opcode->iclass == condbranch)
290 {
291 /* Stop analysis on branch. */
292 break;
293 }
294 else if (inst.opcode->iclass == branch_reg)
295 {
296 /* Stop analysis on branch. */
297 break;
298 }
299 else if (inst.opcode->iclass == compbranch)
300 {
301 /* Stop analysis on branch. */
302 break;
303 }
304 else if (inst.opcode->op == OP_MOVZ)
305 {
306 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
307 regs[inst.operands[0].reg.regno] = pv_unknown ();
308 }
309 else if (inst.opcode->iclass == log_shift
310 && strcmp (inst.opcode->name, "orr") == 0)
311 {
312 unsigned rd = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].reg.regno;
314 unsigned rm = inst.operands[2].reg.regno;
315
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
318 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
319
320 if (inst.operands[2].shifter.amount == 0
321 && rn == AARCH64_SP_REGNUM)
322 regs[rd] = regs[rm];
323 else
324 {
325 if (aarch64_debug)
326 {
327 debug_printf ("aarch64: prologue analysis gave up "
328 "addr=%s opcode=0x%x (orr x register)\n",
329 core_addr_to_string_nz (start), insn);
330 }
331 break;
332 }
333 }
334 else if (inst.opcode->op == OP_STUR)
335 {
336 unsigned rt = inst.operands[0].reg.regno;
337 unsigned rn = inst.operands[1].addr.base_regno;
338 int is64
339 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
340
341 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
342 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
343 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
344 gdb_assert (!inst.operands[1].addr.offset.is_reg);
345
346 stack.store (pv_add_constant (regs[rn],
347 inst.operands[1].addr.offset.imm),
348 is64 ? 8 : 4, regs[rt]);
349 }
350 else if ((inst.opcode->iclass == ldstpair_off
351 || (inst.opcode->iclass == ldstpair_indexed
352 && inst.operands[2].addr.preind))
353 && strcmp ("stp", inst.opcode->name) == 0)
354 {
355 /* STP with addressing mode Pre-indexed and Base register. */
356 unsigned rt1;
357 unsigned rt2;
358 unsigned rn = inst.operands[2].addr.base_regno;
359 int32_t imm = inst.operands[2].addr.offset.imm;
360
361 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
362 || inst.operands[0].type == AARCH64_OPND_Ft);
363 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
364 || inst.operands[1].type == AARCH64_OPND_Ft2);
365 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
366 gdb_assert (!inst.operands[2].addr.offset.is_reg);
367
368 /* If recording this store would invalidate the store area
369 (perhaps because rn is not known) then we should abandon
370 further prologue analysis. */
371 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
372 break;
373
374 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
375 break;
376
377 rt1 = inst.operands[0].reg.regno;
378 rt2 = inst.operands[1].reg.regno;
379 if (inst.operands[0].type == AARCH64_OPND_Ft)
380 {
381 /* Only bottom 64-bit of each V register (D register) need
382 to be preserved. */
383 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
384 rt1 += AARCH64_X_REGISTER_COUNT;
385 rt2 += AARCH64_X_REGISTER_COUNT;
386 }
387
388 stack.store (pv_add_constant (regs[rn], imm), 8,
389 regs[rt1]);
390 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
391 regs[rt2]);
392
393 if (inst.operands[2].addr.writeback)
394 regs[rn] = pv_add_constant (regs[rn], imm);
395
396 }
397 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
398 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
399 && (inst.opcode->op == OP_STR_POS
400 || inst.opcode->op == OP_STRF_POS)))
401 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
402 && strcmp ("str", inst.opcode->name) == 0)
403 {
404 /* STR (immediate) */
405 unsigned int rt = inst.operands[0].reg.regno;
406 int32_t imm = inst.operands[1].addr.offset.imm;
407 unsigned int rn = inst.operands[1].addr.base_regno;
408 bool is64
409 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
410 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
411 || inst.operands[0].type == AARCH64_OPND_Ft);
412
413 if (inst.operands[0].type == AARCH64_OPND_Ft)
414 {
415 /* Only bottom 64-bit of each V register (D register) need
416 to be preserved. */
417 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
418 rt += AARCH64_X_REGISTER_COUNT;
419 }
420
421 stack.store (pv_add_constant (regs[rn], imm),
422 is64 ? 8 : 4, regs[rt]);
423 if (inst.operands[1].addr.writeback)
424 regs[rn] = pv_add_constant (regs[rn], imm);
425 }
426 else if (inst.opcode->iclass == testbranch)
427 {
428 /* Stop analysis on branch. */
429 break;
430 }
431 else
432 {
433 if (aarch64_debug)
434 {
435 debug_printf ("aarch64: prologue analysis gave up addr=%s"
436 " opcode=0x%x\n",
437 core_addr_to_string_nz (start), insn);
438 }
439 break;
440 }
441 }
442
443 if (cache == NULL)
444 return start;
445
446 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
447 {
448 /* Frame pointer is fp. Frame size is constant. */
449 cache->framereg = AARCH64_FP_REGNUM;
450 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
451 }
452 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
453 {
454 /* Try the stack pointer. */
455 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
456 cache->framereg = AARCH64_SP_REGNUM;
457 }
458 else
459 {
460 /* We're just out of luck. We don't know where the frame is. */
461 cache->framereg = -1;
462 cache->framesize = 0;
463 }
464
465 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
466 {
467 CORE_ADDR offset;
468
469 if (stack.find_reg (gdbarch, i, &offset))
470 cache->saved_regs[i].addr = offset;
471 }
472
473 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
474 {
475 int regnum = gdbarch_num_regs (gdbarch);
476 CORE_ADDR offset;
477
478 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
479 &offset))
480 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
481 }
482
483 return start;
484 }
485
486 static CORE_ADDR
487 aarch64_analyze_prologue (struct gdbarch *gdbarch,
488 CORE_ADDR start, CORE_ADDR limit,
489 struct aarch64_prologue_cache *cache)
490 {
491 instruction_reader reader;
492
493 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
494 reader);
495 }
496
497 #if GDB_SELF_TEST
498
499 namespace selftests {
500
501 /* Instruction reader from manually cooked instruction sequences. */
502
503 class instruction_reader_test : public abstract_instruction_reader
504 {
505 public:
506 template<size_t SIZE>
507 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
508 : m_insns (insns), m_insns_size (SIZE)
509 {}
510
511 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
512 override
513 {
514 SELF_CHECK (len == 4);
515 SELF_CHECK (memaddr % 4 == 0);
516 SELF_CHECK (memaddr / 4 < m_insns_size);
517
518 return m_insns[memaddr / 4];
519 }
520
521 private:
522 const uint32_t *m_insns;
523 size_t m_insns_size;
524 };
525
526 static void
527 aarch64_analyze_prologue_test (void)
528 {
529 struct gdbarch_info info;
530
531 gdbarch_info_init (&info);
532 info.bfd_arch_info = bfd_scan_arch ("aarch64");
533
534 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
535 SELF_CHECK (gdbarch != NULL);
536
537 /* Test the simple prologue in which frame pointer is used. */
538 {
539 struct aarch64_prologue_cache cache;
540 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
541
542 static const uint32_t insns[] = {
543 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
544 0x910003fd, /* mov x29, sp */
545 0x97ffffe6, /* bl 0x400580 */
546 };
547 instruction_reader_test reader (insns);
548
549 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
550 SELF_CHECK (end == 4 * 2);
551
552 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
553 SELF_CHECK (cache.framesize == 272);
554
555 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
556 {
557 if (i == AARCH64_FP_REGNUM)
558 SELF_CHECK (cache.saved_regs[i].addr == -272);
559 else if (i == AARCH64_LR_REGNUM)
560 SELF_CHECK (cache.saved_regs[i].addr == -264);
561 else
562 SELF_CHECK (cache.saved_regs[i].addr == -1);
563 }
564
565 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
566 {
567 int regnum = gdbarch_num_regs (gdbarch);
568
569 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
570 == -1);
571 }
572 }
573
574 /* Test a prologue in which STR is used and frame pointer is not
575 used. */
576 {
577 struct aarch64_prologue_cache cache;
578 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
579
580 static const uint32_t insns[] = {
581 0xf81d0ff3, /* str x19, [sp, #-48]! */
582 0xb9002fe0, /* str w0, [sp, #44] */
583 0xf90013e1, /* str x1, [sp, #32]*/
584 0xfd000fe0, /* str d0, [sp, #24] */
585 0xaa0203f3, /* mov x19, x2 */
586 0xf94013e0, /* ldr x0, [sp, #32] */
587 };
588 instruction_reader_test reader (insns);
589
590 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
591
592 SELF_CHECK (end == 4 * 5);
593
594 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
595 SELF_CHECK (cache.framesize == 48);
596
597 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
598 {
599 if (i == 1)
600 SELF_CHECK (cache.saved_regs[i].addr == -16);
601 else if (i == 19)
602 SELF_CHECK (cache.saved_regs[i].addr == -48);
603 else
604 SELF_CHECK (cache.saved_regs[i].addr == -1);
605 }
606
607 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
608 {
609 int regnum = gdbarch_num_regs (gdbarch);
610
611 if (i == 0)
612 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
613 == -24);
614 else
615 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
616 == -1);
617 }
618 }
619 }
620 } // namespace selftests
621 #endif /* GDB_SELF_TEST */
622
623 /* Implement the "skip_prologue" gdbarch method. */
624
625 static CORE_ADDR
626 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
627 {
628 CORE_ADDR func_addr, limit_pc;
629
630 /* See if we can determine the end of the prologue via the symbol
631 table. If so, then return either PC, or the PC after the
632 prologue, whichever is greater. */
633 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
634 {
635 CORE_ADDR post_prologue_pc
636 = skip_prologue_using_sal (gdbarch, func_addr);
637
638 if (post_prologue_pc != 0)
639 return std::max (pc, post_prologue_pc);
640 }
641
642 /* Can't determine prologue from the symbol table, need to examine
643 instructions. */
644
645 /* Find an upper limit on the function prologue using the debug
646 information. If the debug information could not be used to
647 provide that bound, then use an arbitrary large number as the
648 upper bound. */
649 limit_pc = skip_prologue_using_sal (gdbarch, pc);
650 if (limit_pc == 0)
651 limit_pc = pc + 128; /* Magic. */
652
653 /* Try disassembling prologue. */
654 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
655 }
656
657 /* Scan the function prologue for THIS_FRAME and populate the prologue
658 cache CACHE. */
659
660 static void
661 aarch64_scan_prologue (struct frame_info *this_frame,
662 struct aarch64_prologue_cache *cache)
663 {
664 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
665 CORE_ADDR prologue_start;
666 CORE_ADDR prologue_end;
667 CORE_ADDR prev_pc = get_frame_pc (this_frame);
668 struct gdbarch *gdbarch = get_frame_arch (this_frame);
669
670 cache->prev_pc = prev_pc;
671
672 /* Assume we do not find a frame. */
673 cache->framereg = -1;
674 cache->framesize = 0;
675
676 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
677 &prologue_end))
678 {
679 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
680
681 if (sal.line == 0)
682 {
683 /* No line info so use the current PC. */
684 prologue_end = prev_pc;
685 }
686 else if (sal.end < prologue_end)
687 {
688 /* The next line begins after the function end. */
689 prologue_end = sal.end;
690 }
691
692 prologue_end = std::min (prologue_end, prev_pc);
693 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
694 }
695 else
696 {
697 CORE_ADDR frame_loc;
698
699 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
700 if (frame_loc == 0)
701 return;
702
703 cache->framereg = AARCH64_FP_REGNUM;
704 cache->framesize = 16;
705 cache->saved_regs[29].addr = 0;
706 cache->saved_regs[30].addr = 8;
707 }
708 }
709
710 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
711 function may throw an exception if the inferior's registers or memory is
712 not available. */
713
714 static void
715 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
716 struct aarch64_prologue_cache *cache)
717 {
718 CORE_ADDR unwound_fp;
719 int reg;
720
721 aarch64_scan_prologue (this_frame, cache);
722
723 if (cache->framereg == -1)
724 return;
725
726 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
727 if (unwound_fp == 0)
728 return;
729
730 cache->prev_sp = unwound_fp + cache->framesize;
731
732 /* Calculate actual addresses of saved registers using offsets
733 determined by aarch64_analyze_prologue. */
734 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
735 if (trad_frame_addr_p (cache->saved_regs, reg))
736 cache->saved_regs[reg].addr += cache->prev_sp;
737
738 cache->func = get_frame_func (this_frame);
739
740 cache->available_p = 1;
741 }
742
743 /* Allocate and fill in *THIS_CACHE with information about the prologue of
744 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
745 Return a pointer to the current aarch64_prologue_cache in
746 *THIS_CACHE. */
747
748 static struct aarch64_prologue_cache *
749 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
750 {
751 struct aarch64_prologue_cache *cache;
752
753 if (*this_cache != NULL)
754 return (struct aarch64_prologue_cache *) *this_cache;
755
756 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
757 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
758 *this_cache = cache;
759
760 TRY
761 {
762 aarch64_make_prologue_cache_1 (this_frame, cache);
763 }
764 CATCH (ex, RETURN_MASK_ERROR)
765 {
766 if (ex.error != NOT_AVAILABLE_ERROR)
767 throw_exception (ex);
768 }
769 END_CATCH
770
771 return cache;
772 }
773
774 /* Implement the "stop_reason" frame_unwind method. */
775
776 static enum unwind_stop_reason
777 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
778 void **this_cache)
779 {
780 struct aarch64_prologue_cache *cache
781 = aarch64_make_prologue_cache (this_frame, this_cache);
782
783 if (!cache->available_p)
784 return UNWIND_UNAVAILABLE;
785
786 /* Halt the backtrace at "_start". */
787 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
788 return UNWIND_OUTERMOST;
789
790 /* We've hit a wall, stop. */
791 if (cache->prev_sp == 0)
792 return UNWIND_OUTERMOST;
793
794 return UNWIND_NO_REASON;
795 }
796
797 /* Our frame ID for a normal frame is the current function's starting
798 PC and the caller's SP when we were called. */
799
800 static void
801 aarch64_prologue_this_id (struct frame_info *this_frame,
802 void **this_cache, struct frame_id *this_id)
803 {
804 struct aarch64_prologue_cache *cache
805 = aarch64_make_prologue_cache (this_frame, this_cache);
806
807 if (!cache->available_p)
808 *this_id = frame_id_build_unavailable_stack (cache->func);
809 else
810 *this_id = frame_id_build (cache->prev_sp, cache->func);
811 }
812
813 /* Implement the "prev_register" frame_unwind method. */
814
815 static struct value *
816 aarch64_prologue_prev_register (struct frame_info *this_frame,
817 void **this_cache, int prev_regnum)
818 {
819 struct aarch64_prologue_cache *cache
820 = aarch64_make_prologue_cache (this_frame, this_cache);
821
822 /* If we are asked to unwind the PC, then we need to return the LR
823 instead. The prologue may save PC, but it will point into this
824 frame's prologue, not the next frame's resume location. */
825 if (prev_regnum == AARCH64_PC_REGNUM)
826 {
827 CORE_ADDR lr;
828
829 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
830 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
831 }
832
833 /* SP is generally not saved to the stack, but this frame is
834 identified by the next frame's stack pointer at the time of the
835 call. The value was already reconstructed into PREV_SP. */
836 /*
837 +----------+ ^
838 | saved lr | |
839 +->| saved fp |--+
840 | | |
841 | | | <- Previous SP
842 | +----------+
843 | | saved lr |
844 +--| saved fp |<- FP
845 | |
846 | |<- SP
847 +----------+ */
848 if (prev_regnum == AARCH64_SP_REGNUM)
849 return frame_unwind_got_constant (this_frame, prev_regnum,
850 cache->prev_sp);
851
852 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
853 prev_regnum);
854 }
855
856 /* AArch64 prologue unwinder. */
857 struct frame_unwind aarch64_prologue_unwind =
858 {
859 NORMAL_FRAME,
860 aarch64_prologue_frame_unwind_stop_reason,
861 aarch64_prologue_this_id,
862 aarch64_prologue_prev_register,
863 NULL,
864 default_frame_sniffer
865 };
866
867 /* Allocate and fill in *THIS_CACHE with information about the prologue of
868 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
869 Return a pointer to the current aarch64_prologue_cache in
870 *THIS_CACHE. */
871
872 static struct aarch64_prologue_cache *
873 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
874 {
875 struct aarch64_prologue_cache *cache;
876
877 if (*this_cache != NULL)
878 return (struct aarch64_prologue_cache *) *this_cache;
879
880 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
881 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
882 *this_cache = cache;
883
884 TRY
885 {
886 cache->prev_sp = get_frame_register_unsigned (this_frame,
887 AARCH64_SP_REGNUM);
888 cache->prev_pc = get_frame_pc (this_frame);
889 cache->available_p = 1;
890 }
891 CATCH (ex, RETURN_MASK_ERROR)
892 {
893 if (ex.error != NOT_AVAILABLE_ERROR)
894 throw_exception (ex);
895 }
896 END_CATCH
897
898 return cache;
899 }
900
901 /* Implement the "stop_reason" frame_unwind method. */
902
903 static enum unwind_stop_reason
904 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
905 void **this_cache)
906 {
907 struct aarch64_prologue_cache *cache
908 = aarch64_make_stub_cache (this_frame, this_cache);
909
910 if (!cache->available_p)
911 return UNWIND_UNAVAILABLE;
912
913 return UNWIND_NO_REASON;
914 }
915
916 /* Our frame ID for a stub frame is the current SP and LR. */
917
918 static void
919 aarch64_stub_this_id (struct frame_info *this_frame,
920 void **this_cache, struct frame_id *this_id)
921 {
922 struct aarch64_prologue_cache *cache
923 = aarch64_make_stub_cache (this_frame, this_cache);
924
925 if (cache->available_p)
926 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
927 else
928 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
929 }
930
931 /* Implement the "sniffer" frame_unwind method. */
932
933 static int
934 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
935 struct frame_info *this_frame,
936 void **this_prologue_cache)
937 {
938 CORE_ADDR addr_in_block;
939 gdb_byte dummy[4];
940
941 addr_in_block = get_frame_address_in_block (this_frame);
942 if (in_plt_section (addr_in_block)
943 /* We also use the stub winder if the target memory is unreadable
944 to avoid having the prologue unwinder trying to read it. */
945 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
946 return 1;
947
948 return 0;
949 }
950
951 /* AArch64 stub unwinder. */
952 struct frame_unwind aarch64_stub_unwind =
953 {
954 NORMAL_FRAME,
955 aarch64_stub_frame_unwind_stop_reason,
956 aarch64_stub_this_id,
957 aarch64_prologue_prev_register,
958 NULL,
959 aarch64_stub_unwind_sniffer
960 };
961
962 /* Return the frame base address of *THIS_FRAME. */
963
964 static CORE_ADDR
965 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
966 {
967 struct aarch64_prologue_cache *cache
968 = aarch64_make_prologue_cache (this_frame, this_cache);
969
970 return cache->prev_sp - cache->framesize;
971 }
972
973 /* AArch64 default frame base information. */
974 struct frame_base aarch64_normal_base =
975 {
976 &aarch64_prologue_unwind,
977 aarch64_normal_frame_base,
978 aarch64_normal_frame_base,
979 aarch64_normal_frame_base
980 };
981
982 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
983 dummy frame. The frame ID's base needs to match the TOS value
984 saved by save_dummy_frame_tos () and returned from
985 aarch64_push_dummy_call, and the PC needs to match the dummy
986 frame's breakpoint. */
987
988 static struct frame_id
989 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
990 {
991 return frame_id_build (get_frame_register_unsigned (this_frame,
992 AARCH64_SP_REGNUM),
993 get_frame_pc (this_frame));
994 }
995
996 /* Implement the "unwind_pc" gdbarch method. */
997
998 static CORE_ADDR
999 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1000 {
1001 CORE_ADDR pc
1002 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1003
1004 return pc;
1005 }
1006
1007 /* Implement the "unwind_sp" gdbarch method. */
1008
1009 static CORE_ADDR
1010 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1011 {
1012 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1013 }
1014
1015 /* Return the value of the REGNUM register in the previous frame of
1016 *THIS_FRAME. */
1017
1018 static struct value *
1019 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1020 void **this_cache, int regnum)
1021 {
1022 CORE_ADDR lr;
1023
1024 switch (regnum)
1025 {
1026 case AARCH64_PC_REGNUM:
1027 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1028 return frame_unwind_got_constant (this_frame, regnum, lr);
1029
1030 default:
1031 internal_error (__FILE__, __LINE__,
1032 _("Unexpected register %d"), regnum);
1033 }
1034 }
1035
1036 /* Implement the "init_reg" dwarf2_frame_ops method. */
1037
1038 static void
1039 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1040 struct dwarf2_frame_state_reg *reg,
1041 struct frame_info *this_frame)
1042 {
1043 switch (regnum)
1044 {
1045 case AARCH64_PC_REGNUM:
1046 reg->how = DWARF2_FRAME_REG_FN;
1047 reg->loc.fn = aarch64_dwarf2_prev_register;
1048 break;
1049 case AARCH64_SP_REGNUM:
1050 reg->how = DWARF2_FRAME_REG_CFA;
1051 break;
1052 }
1053 }
1054
1055 /* When arguments must be pushed onto the stack, they go on in reverse
1056 order. The code below implements a FILO (stack) to do this. */
1057
1058 typedef struct
1059 {
1060 /* Value to pass on stack. It can be NULL if this item is for stack
1061 padding. */
1062 const gdb_byte *data;
1063
1064 /* Size in bytes of value to pass on stack. */
1065 int len;
1066 } stack_item_t;
1067
1068 DEF_VEC_O (stack_item_t);
1069
1070 /* Return the alignment (in bytes) of the given type. */
1071
1072 static int
1073 aarch64_type_align (struct type *t)
1074 {
1075 int n;
1076 int align;
1077 int falign;
1078
1079 t = check_typedef (t);
1080 switch (TYPE_CODE (t))
1081 {
1082 default:
1083 /* Should never happen. */
1084 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1085 return 4;
1086
1087 case TYPE_CODE_PTR:
1088 case TYPE_CODE_ENUM:
1089 case TYPE_CODE_INT:
1090 case TYPE_CODE_FLT:
1091 case TYPE_CODE_SET:
1092 case TYPE_CODE_RANGE:
1093 case TYPE_CODE_BITSTRING:
1094 case TYPE_CODE_REF:
1095 case TYPE_CODE_RVALUE_REF:
1096 case TYPE_CODE_CHAR:
1097 case TYPE_CODE_BOOL:
1098 return TYPE_LENGTH (t);
1099
1100 case TYPE_CODE_ARRAY:
1101 if (TYPE_VECTOR (t))
1102 {
1103 /* Use the natural alignment for vector types (the same for
1104 scalar type), but the maximum alignment is 128-bit. */
1105 if (TYPE_LENGTH (t) > 16)
1106 return 16;
1107 else
1108 return TYPE_LENGTH (t);
1109 }
1110 else
1111 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1112 case TYPE_CODE_COMPLEX:
1113 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1114
1115 case TYPE_CODE_STRUCT:
1116 case TYPE_CODE_UNION:
1117 align = 1;
1118 for (n = 0; n < TYPE_NFIELDS (t); n++)
1119 {
1120 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1121 if (falign > align)
1122 align = falign;
1123 }
1124 return align;
1125 }
1126 }
1127
1128 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1129 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1130 document; otherwise return 0. */
1131
1132 static int
1133 is_hfa_or_hva (struct type *ty)
1134 {
1135 switch (TYPE_CODE (ty))
1136 {
1137 case TYPE_CODE_ARRAY:
1138 {
1139 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1140
1141 if (TYPE_VECTOR (ty))
1142 return 0;
1143
1144 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1145 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1146 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1147 && TYPE_VECTOR (target_ty))))
1148 return 1;
1149 break;
1150 }
1151
1152 case TYPE_CODE_UNION:
1153 case TYPE_CODE_STRUCT:
1154 {
1155 /* HFA or HVA has at most four members. */
1156 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1157 {
1158 struct type *member0_type;
1159
1160 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1161 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1162 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1163 && TYPE_VECTOR (member0_type)))
1164 {
1165 int i;
1166
1167 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1168 {
1169 struct type *member1_type;
1170
1171 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1172 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1173 || (TYPE_LENGTH (member0_type)
1174 != TYPE_LENGTH (member1_type)))
1175 return 0;
1176 }
1177 return 1;
1178 }
1179 }
1180 return 0;
1181 }
1182
1183 default:
1184 break;
1185 }
1186
1187 return 0;
1188 }
1189
1190 /* AArch64 function call information structure. */
1191 struct aarch64_call_info
1192 {
1193 /* the current argument number. */
1194 unsigned argnum;
1195
1196 /* The next general purpose register number, equivalent to NGRN as
1197 described in the AArch64 Procedure Call Standard. */
1198 unsigned ngrn;
1199
1200 /* The next SIMD and floating point register number, equivalent to
1201 NSRN as described in the AArch64 Procedure Call Standard. */
1202 unsigned nsrn;
1203
1204 /* The next stacked argument address, equivalent to NSAA as
1205 described in the AArch64 Procedure Call Standard. */
1206 unsigned nsaa;
1207
1208 /* Stack item vector. */
1209 VEC(stack_item_t) *si;
1210 };
1211
1212 /* Pass a value in a sequence of consecutive X registers. The caller
1213 is responsbile for ensuring sufficient registers are available. */
1214
1215 static void
1216 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1217 struct aarch64_call_info *info, struct type *type,
1218 struct value *arg)
1219 {
1220 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1221 int len = TYPE_LENGTH (type);
1222 enum type_code typecode = TYPE_CODE (type);
1223 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1224 const bfd_byte *buf = value_contents (arg);
1225
1226 info->argnum++;
1227
1228 while (len > 0)
1229 {
1230 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1231 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1232 byte_order);
1233
1234
1235 /* Adjust sub-word struct/union args when big-endian. */
1236 if (byte_order == BFD_ENDIAN_BIG
1237 && partial_len < X_REGISTER_SIZE
1238 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1239 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1240
1241 if (aarch64_debug)
1242 {
1243 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1244 gdbarch_register_name (gdbarch, regnum),
1245 phex (regval, X_REGISTER_SIZE));
1246 }
1247 regcache_cooked_write_unsigned (regcache, regnum, regval);
1248 len -= partial_len;
1249 buf += partial_len;
1250 regnum++;
1251 }
1252 }
1253
1254 /* Attempt to marshall a value in a V register. Return 1 if
1255 successful, or 0 if insufficient registers are available. This
1256 function, unlike the equivalent pass_in_x() function does not
1257 handle arguments spread across multiple registers. */
1258
1259 static int
1260 pass_in_v (struct gdbarch *gdbarch,
1261 struct regcache *regcache,
1262 struct aarch64_call_info *info,
1263 int len, const bfd_byte *buf)
1264 {
1265 if (info->nsrn < 8)
1266 {
1267 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1268 gdb_byte reg[V_REGISTER_SIZE];
1269
1270 info->argnum++;
1271 info->nsrn++;
1272
1273 memset (reg, 0, sizeof (reg));
1274 /* PCS C.1, the argument is allocated to the least significant
1275 bits of V register. */
1276 memcpy (reg, buf, len);
1277 regcache->cooked_write (regnum, reg);
1278
1279 if (aarch64_debug)
1280 {
1281 debug_printf ("arg %d in %s\n", info->argnum,
1282 gdbarch_register_name (gdbarch, regnum));
1283 }
1284 return 1;
1285 }
1286 info->nsrn = 8;
1287 return 0;
1288 }
1289
1290 /* Marshall an argument onto the stack. */
1291
1292 static void
1293 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1294 struct value *arg)
1295 {
1296 const bfd_byte *buf = value_contents (arg);
1297 int len = TYPE_LENGTH (type);
1298 int align;
1299 stack_item_t item;
1300
1301 info->argnum++;
1302
1303 align = aarch64_type_align (type);
1304
1305 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1306 Natural alignment of the argument's type. */
1307 align = align_up (align, 8);
1308
1309 /* The AArch64 PCS requires at most doubleword alignment. */
1310 if (align > 16)
1311 align = 16;
1312
1313 if (aarch64_debug)
1314 {
1315 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1316 info->nsaa);
1317 }
1318
1319 item.len = len;
1320 item.data = buf;
1321 VEC_safe_push (stack_item_t, info->si, &item);
1322
1323 info->nsaa += len;
1324 if (info->nsaa & (align - 1))
1325 {
1326 /* Push stack alignment padding. */
1327 int pad = align - (info->nsaa & (align - 1));
1328
1329 item.len = pad;
1330 item.data = NULL;
1331
1332 VEC_safe_push (stack_item_t, info->si, &item);
1333 info->nsaa += pad;
1334 }
1335 }
1336
1337 /* Marshall an argument into a sequence of one or more consecutive X
1338 registers or, if insufficient X registers are available then onto
1339 the stack. */
1340
1341 static void
1342 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1343 struct aarch64_call_info *info, struct type *type,
1344 struct value *arg)
1345 {
1346 int len = TYPE_LENGTH (type);
1347 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1348
1349 /* PCS C.13 - Pass in registers if we have enough spare */
1350 if (info->ngrn + nregs <= 8)
1351 {
1352 pass_in_x (gdbarch, regcache, info, type, arg);
1353 info->ngrn += nregs;
1354 }
1355 else
1356 {
1357 info->ngrn = 8;
1358 pass_on_stack (info, type, arg);
1359 }
1360 }
1361
1362 /* Pass a value in a V register, or on the stack if insufficient are
1363 available. */
1364
1365 static void
1366 pass_in_v_or_stack (struct gdbarch *gdbarch,
1367 struct regcache *regcache,
1368 struct aarch64_call_info *info,
1369 struct type *type,
1370 struct value *arg)
1371 {
1372 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1373 value_contents (arg)))
1374 pass_on_stack (info, type, arg);
1375 }
1376
1377 /* Implement the "push_dummy_call" gdbarch method. */
1378
1379 static CORE_ADDR
1380 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1381 struct regcache *regcache, CORE_ADDR bp_addr,
1382 int nargs,
1383 struct value **args, CORE_ADDR sp, int struct_return,
1384 CORE_ADDR struct_addr)
1385 {
1386 int argnum;
1387 struct aarch64_call_info info;
1388 struct type *func_type;
1389 struct type *return_type;
1390 int lang_struct_return;
1391
1392 memset (&info, 0, sizeof (info));
1393
1394 /* We need to know what the type of the called function is in order
1395 to determine the number of named/anonymous arguments for the
1396 actual argument placement, and the return type in order to handle
1397 return value correctly.
1398
1399 The generic code above us views the decision of return in memory
1400 or return in registers as a two stage processes. The language
1401 handler is consulted first and may decide to return in memory (eg
1402 class with copy constructor returned by value), this will cause
1403 the generic code to allocate space AND insert an initial leading
1404 argument.
1405
1406 If the language code does not decide to pass in memory then the
1407 target code is consulted.
1408
1409 If the language code decides to pass in memory we want to move
1410 the pointer inserted as the initial argument from the argument
1411 list and into X8, the conventional AArch64 struct return pointer
1412 register.
1413
1414 This is slightly awkward, ideally the flag "lang_struct_return"
1415 would be passed to the targets implementation of push_dummy_call.
1416 Rather that change the target interface we call the language code
1417 directly ourselves. */
1418
1419 func_type = check_typedef (value_type (function));
1420
1421 /* Dereference function pointer types. */
1422 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1423 func_type = TYPE_TARGET_TYPE (func_type);
1424
1425 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1426 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1427
1428 /* If language_pass_by_reference () returned true we will have been
1429 given an additional initial argument, a hidden pointer to the
1430 return slot in memory. */
1431 return_type = TYPE_TARGET_TYPE (func_type);
1432 lang_struct_return = language_pass_by_reference (return_type);
1433
1434 /* Set the return address. For the AArch64, the return breakpoint
1435 is always at BP_ADDR. */
1436 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1437
1438 /* If we were given an initial argument for the return slot because
1439 lang_struct_return was true, lose it. */
1440 if (lang_struct_return)
1441 {
1442 args++;
1443 nargs--;
1444 }
1445
1446 /* The struct_return pointer occupies X8. */
1447 if (struct_return || lang_struct_return)
1448 {
1449 if (aarch64_debug)
1450 {
1451 debug_printf ("struct return in %s = 0x%s\n",
1452 gdbarch_register_name (gdbarch,
1453 AARCH64_STRUCT_RETURN_REGNUM),
1454 paddress (gdbarch, struct_addr));
1455 }
1456 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1457 struct_addr);
1458 }
1459
1460 for (argnum = 0; argnum < nargs; argnum++)
1461 {
1462 struct value *arg = args[argnum];
1463 struct type *arg_type;
1464 int len;
1465
1466 arg_type = check_typedef (value_type (arg));
1467 len = TYPE_LENGTH (arg_type);
1468
1469 switch (TYPE_CODE (arg_type))
1470 {
1471 case TYPE_CODE_INT:
1472 case TYPE_CODE_BOOL:
1473 case TYPE_CODE_CHAR:
1474 case TYPE_CODE_RANGE:
1475 case TYPE_CODE_ENUM:
1476 if (len < 4)
1477 {
1478 /* Promote to 32 bit integer. */
1479 if (TYPE_UNSIGNED (arg_type))
1480 arg_type = builtin_type (gdbarch)->builtin_uint32;
1481 else
1482 arg_type = builtin_type (gdbarch)->builtin_int32;
1483 arg = value_cast (arg_type, arg);
1484 }
1485 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1486 break;
1487
1488 case TYPE_CODE_COMPLEX:
1489 if (info.nsrn <= 6)
1490 {
1491 const bfd_byte *buf = value_contents (arg);
1492 struct type *target_type =
1493 check_typedef (TYPE_TARGET_TYPE (arg_type));
1494
1495 pass_in_v (gdbarch, regcache, &info,
1496 TYPE_LENGTH (target_type), buf);
1497 pass_in_v (gdbarch, regcache, &info,
1498 TYPE_LENGTH (target_type),
1499 buf + TYPE_LENGTH (target_type));
1500 }
1501 else
1502 {
1503 info.nsrn = 8;
1504 pass_on_stack (&info, arg_type, arg);
1505 }
1506 break;
1507 case TYPE_CODE_FLT:
1508 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1509 break;
1510
1511 case TYPE_CODE_STRUCT:
1512 case TYPE_CODE_ARRAY:
1513 case TYPE_CODE_UNION:
1514 if (is_hfa_or_hva (arg_type))
1515 {
1516 int elements = TYPE_NFIELDS (arg_type);
1517
1518 /* Homogeneous Aggregates */
1519 if (info.nsrn + elements < 8)
1520 {
1521 int i;
1522
1523 for (i = 0; i < elements; i++)
1524 {
1525 /* We know that we have sufficient registers
1526 available therefore this will never fallback
1527 to the stack. */
1528 struct value *field =
1529 value_primitive_field (arg, 0, i, arg_type);
1530 struct type *field_type =
1531 check_typedef (value_type (field));
1532
1533 pass_in_v_or_stack (gdbarch, regcache, &info,
1534 field_type, field);
1535 }
1536 }
1537 else
1538 {
1539 info.nsrn = 8;
1540 pass_on_stack (&info, arg_type, arg);
1541 }
1542 }
1543 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1544 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1545 {
1546 /* Short vector types are passed in V registers. */
1547 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1548 }
1549 else if (len > 16)
1550 {
1551 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1552 invisible reference. */
1553
1554 /* Allocate aligned storage. */
1555 sp = align_down (sp - len, 16);
1556
1557 /* Write the real data into the stack. */
1558 write_memory (sp, value_contents (arg), len);
1559
1560 /* Construct the indirection. */
1561 arg_type = lookup_pointer_type (arg_type);
1562 arg = value_from_pointer (arg_type, sp);
1563 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1564 }
1565 else
1566 /* PCS C.15 / C.18 multiple values pass. */
1567 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1568 break;
1569
1570 default:
1571 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1572 break;
1573 }
1574 }
1575
1576 /* Make sure stack retains 16 byte alignment. */
1577 if (info.nsaa & 15)
1578 sp -= 16 - (info.nsaa & 15);
1579
1580 while (!VEC_empty (stack_item_t, info.si))
1581 {
1582 stack_item_t *si = VEC_last (stack_item_t, info.si);
1583
1584 sp -= si->len;
1585 if (si->data != NULL)
1586 write_memory (sp, si->data, si->len);
1587 VEC_pop (stack_item_t, info.si);
1588 }
1589
1590 VEC_free (stack_item_t, info.si);
1591
1592 /* Finally, update the SP register. */
1593 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1594
1595 return sp;
1596 }
1597
1598 /* Implement the "frame_align" gdbarch method. */
1599
1600 static CORE_ADDR
1601 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1602 {
1603 /* Align the stack to sixteen bytes. */
1604 return sp & ~(CORE_ADDR) 15;
1605 }
1606
1607 /* Return the type for an AdvSISD Q register. */
1608
1609 static struct type *
1610 aarch64_vnq_type (struct gdbarch *gdbarch)
1611 {
1612 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1613
1614 if (tdep->vnq_type == NULL)
1615 {
1616 struct type *t;
1617 struct type *elem;
1618
1619 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1620 TYPE_CODE_UNION);
1621
1622 elem = builtin_type (gdbarch)->builtin_uint128;
1623 append_composite_type_field (t, "u", elem);
1624
1625 elem = builtin_type (gdbarch)->builtin_int128;
1626 append_composite_type_field (t, "s", elem);
1627
1628 tdep->vnq_type = t;
1629 }
1630
1631 return tdep->vnq_type;
1632 }
1633
1634 /* Return the type for an AdvSISD D register. */
1635
1636 static struct type *
1637 aarch64_vnd_type (struct gdbarch *gdbarch)
1638 {
1639 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1640
1641 if (tdep->vnd_type == NULL)
1642 {
1643 struct type *t;
1644 struct type *elem;
1645
1646 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1647 TYPE_CODE_UNION);
1648
1649 elem = builtin_type (gdbarch)->builtin_double;
1650 append_composite_type_field (t, "f", elem);
1651
1652 elem = builtin_type (gdbarch)->builtin_uint64;
1653 append_composite_type_field (t, "u", elem);
1654
1655 elem = builtin_type (gdbarch)->builtin_int64;
1656 append_composite_type_field (t, "s", elem);
1657
1658 tdep->vnd_type = t;
1659 }
1660
1661 return tdep->vnd_type;
1662 }
1663
1664 /* Return the type for an AdvSISD S register. */
1665
1666 static struct type *
1667 aarch64_vns_type (struct gdbarch *gdbarch)
1668 {
1669 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1670
1671 if (tdep->vns_type == NULL)
1672 {
1673 struct type *t;
1674 struct type *elem;
1675
1676 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1677 TYPE_CODE_UNION);
1678
1679 elem = builtin_type (gdbarch)->builtin_float;
1680 append_composite_type_field (t, "f", elem);
1681
1682 elem = builtin_type (gdbarch)->builtin_uint32;
1683 append_composite_type_field (t, "u", elem);
1684
1685 elem = builtin_type (gdbarch)->builtin_int32;
1686 append_composite_type_field (t, "s", elem);
1687
1688 tdep->vns_type = t;
1689 }
1690
1691 return tdep->vns_type;
1692 }
1693
1694 /* Return the type for an AdvSISD H register. */
1695
1696 static struct type *
1697 aarch64_vnh_type (struct gdbarch *gdbarch)
1698 {
1699 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1700
1701 if (tdep->vnh_type == NULL)
1702 {
1703 struct type *t;
1704 struct type *elem;
1705
1706 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1707 TYPE_CODE_UNION);
1708
1709 elem = builtin_type (gdbarch)->builtin_uint16;
1710 append_composite_type_field (t, "u", elem);
1711
1712 elem = builtin_type (gdbarch)->builtin_int16;
1713 append_composite_type_field (t, "s", elem);
1714
1715 tdep->vnh_type = t;
1716 }
1717
1718 return tdep->vnh_type;
1719 }
1720
1721 /* Return the type for an AdvSISD B register. */
1722
1723 static struct type *
1724 aarch64_vnb_type (struct gdbarch *gdbarch)
1725 {
1726 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1727
1728 if (tdep->vnb_type == NULL)
1729 {
1730 struct type *t;
1731 struct type *elem;
1732
1733 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1734 TYPE_CODE_UNION);
1735
1736 elem = builtin_type (gdbarch)->builtin_uint8;
1737 append_composite_type_field (t, "u", elem);
1738
1739 elem = builtin_type (gdbarch)->builtin_int8;
1740 append_composite_type_field (t, "s", elem);
1741
1742 tdep->vnb_type = t;
1743 }
1744
1745 return tdep->vnb_type;
1746 }
1747
1748 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1749
1750 static int
1751 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1752 {
1753 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1754 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1755
1756 if (reg == AARCH64_DWARF_SP)
1757 return AARCH64_SP_REGNUM;
1758
1759 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1760 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1761
1762 return -1;
1763 }
1764 \f
1765
1766 /* Implement the "print_insn" gdbarch method. */
1767
1768 static int
1769 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1770 {
1771 info->symbols = NULL;
1772 return default_print_insn (memaddr, info);
1773 }
1774
1775 /* AArch64 BRK software debug mode instruction.
1776 Note that AArch64 code is always little-endian.
1777 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1778 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1779
1780 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1781
1782 /* Extract from an array REGS containing the (raw) register state a
1783 function return value of type TYPE, and copy that, in virtual
1784 format, into VALBUF. */
1785
1786 static void
1787 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1788 gdb_byte *valbuf)
1789 {
1790 struct gdbarch *gdbarch = regs->arch ();
1791 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1792
1793 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1794 {
1795 bfd_byte buf[V_REGISTER_SIZE];
1796 int len = TYPE_LENGTH (type);
1797
1798 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1799 memcpy (valbuf, buf, len);
1800 }
1801 else if (TYPE_CODE (type) == TYPE_CODE_INT
1802 || TYPE_CODE (type) == TYPE_CODE_CHAR
1803 || TYPE_CODE (type) == TYPE_CODE_BOOL
1804 || TYPE_CODE (type) == TYPE_CODE_PTR
1805 || TYPE_IS_REFERENCE (type)
1806 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1807 {
1808 /* If the the type is a plain integer, then the access is
1809 straight-forward. Otherwise we have to play around a bit
1810 more. */
1811 int len = TYPE_LENGTH (type);
1812 int regno = AARCH64_X0_REGNUM;
1813 ULONGEST tmp;
1814
1815 while (len > 0)
1816 {
1817 /* By using store_unsigned_integer we avoid having to do
1818 anything special for small big-endian values. */
1819 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1820 store_unsigned_integer (valbuf,
1821 (len > X_REGISTER_SIZE
1822 ? X_REGISTER_SIZE : len), byte_order, tmp);
1823 len -= X_REGISTER_SIZE;
1824 valbuf += X_REGISTER_SIZE;
1825 }
1826 }
1827 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1828 {
1829 int regno = AARCH64_V0_REGNUM;
1830 bfd_byte buf[V_REGISTER_SIZE];
1831 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1832 int len = TYPE_LENGTH (target_type);
1833
1834 regs->cooked_read (regno, buf);
1835 memcpy (valbuf, buf, len);
1836 valbuf += len;
1837 regs->cooked_read (regno + 1, buf);
1838 memcpy (valbuf, buf, len);
1839 valbuf += len;
1840 }
1841 else if (is_hfa_or_hva (type))
1842 {
1843 int elements = TYPE_NFIELDS (type);
1844 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1845 int len = TYPE_LENGTH (member_type);
1846 int i;
1847
1848 for (i = 0; i < elements; i++)
1849 {
1850 int regno = AARCH64_V0_REGNUM + i;
1851 bfd_byte buf[V_REGISTER_SIZE];
1852
1853 if (aarch64_debug)
1854 {
1855 debug_printf ("read HFA or HVA return value element %d from %s\n",
1856 i + 1,
1857 gdbarch_register_name (gdbarch, regno));
1858 }
1859 regs->cooked_read (regno, buf);
1860
1861 memcpy (valbuf, buf, len);
1862 valbuf += len;
1863 }
1864 }
1865 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1866 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1867 {
1868 /* Short vector is returned in V register. */
1869 gdb_byte buf[V_REGISTER_SIZE];
1870
1871 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1872 memcpy (valbuf, buf, TYPE_LENGTH (type));
1873 }
1874 else
1875 {
1876 /* For a structure or union the behaviour is as if the value had
1877 been stored to word-aligned memory and then loaded into
1878 registers with 64-bit load instruction(s). */
1879 int len = TYPE_LENGTH (type);
1880 int regno = AARCH64_X0_REGNUM;
1881 bfd_byte buf[X_REGISTER_SIZE];
1882
1883 while (len > 0)
1884 {
1885 regs->cooked_read (regno++, buf);
1886 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1887 len -= X_REGISTER_SIZE;
1888 valbuf += X_REGISTER_SIZE;
1889 }
1890 }
1891 }
1892
1893
1894 /* Will a function return an aggregate type in memory or in a
1895 register? Return 0 if an aggregate type can be returned in a
1896 register, 1 if it must be returned in memory. */
1897
1898 static int
1899 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1900 {
1901 type = check_typedef (type);
1902
1903 if (is_hfa_or_hva (type))
1904 {
1905 /* v0-v7 are used to return values and one register is allocated
1906 for one member. However, HFA or HVA has at most four members. */
1907 return 0;
1908 }
1909
1910 if (TYPE_LENGTH (type) > 16)
1911 {
1912 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1913 invisible reference. */
1914
1915 return 1;
1916 }
1917
1918 return 0;
1919 }
1920
1921 /* Write into appropriate registers a function return value of type
1922 TYPE, given in virtual format. */
1923
1924 static void
1925 aarch64_store_return_value (struct type *type, struct regcache *regs,
1926 const gdb_byte *valbuf)
1927 {
1928 struct gdbarch *gdbarch = regs->arch ();
1929 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1930
1931 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1932 {
1933 bfd_byte buf[V_REGISTER_SIZE];
1934 int len = TYPE_LENGTH (type);
1935
1936 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1937 regs->cooked_write (AARCH64_V0_REGNUM, buf);
1938 }
1939 else if (TYPE_CODE (type) == TYPE_CODE_INT
1940 || TYPE_CODE (type) == TYPE_CODE_CHAR
1941 || TYPE_CODE (type) == TYPE_CODE_BOOL
1942 || TYPE_CODE (type) == TYPE_CODE_PTR
1943 || TYPE_IS_REFERENCE (type)
1944 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1945 {
1946 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1947 {
1948 /* Values of one word or less are zero/sign-extended and
1949 returned in r0. */
1950 bfd_byte tmpbuf[X_REGISTER_SIZE];
1951 LONGEST val = unpack_long (type, valbuf);
1952
1953 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1954 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
1955 }
1956 else
1957 {
1958 /* Integral values greater than one word are stored in
1959 consecutive registers starting with r0. This will always
1960 be a multiple of the regiser size. */
1961 int len = TYPE_LENGTH (type);
1962 int regno = AARCH64_X0_REGNUM;
1963
1964 while (len > 0)
1965 {
1966 regs->cooked_write (regno++, valbuf);
1967 len -= X_REGISTER_SIZE;
1968 valbuf += X_REGISTER_SIZE;
1969 }
1970 }
1971 }
1972 else if (is_hfa_or_hva (type))
1973 {
1974 int elements = TYPE_NFIELDS (type);
1975 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1976 int len = TYPE_LENGTH (member_type);
1977 int i;
1978
1979 for (i = 0; i < elements; i++)
1980 {
1981 int regno = AARCH64_V0_REGNUM + i;
1982 bfd_byte tmpbuf[V_REGISTER_SIZE];
1983
1984 if (aarch64_debug)
1985 {
1986 debug_printf ("write HFA or HVA return value element %d to %s\n",
1987 i + 1,
1988 gdbarch_register_name (gdbarch, regno));
1989 }
1990
1991 memcpy (tmpbuf, valbuf, len);
1992 regs->cooked_write (regno, tmpbuf);
1993 valbuf += len;
1994 }
1995 }
1996 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1997 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1998 {
1999 /* Short vector. */
2000 gdb_byte buf[V_REGISTER_SIZE];
2001
2002 memcpy (buf, valbuf, TYPE_LENGTH (type));
2003 regs->cooked_write (AARCH64_V0_REGNUM, buf);
2004 }
2005 else
2006 {
2007 /* For a structure or union the behaviour is as if the value had
2008 been stored to word-aligned memory and then loaded into
2009 registers with 64-bit load instruction(s). */
2010 int len = TYPE_LENGTH (type);
2011 int regno = AARCH64_X0_REGNUM;
2012 bfd_byte tmpbuf[X_REGISTER_SIZE];
2013
2014 while (len > 0)
2015 {
2016 memcpy (tmpbuf, valbuf,
2017 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2018 regs->cooked_write (regno++, tmpbuf);
2019 len -= X_REGISTER_SIZE;
2020 valbuf += X_REGISTER_SIZE;
2021 }
2022 }
2023 }
2024
2025 /* Implement the "return_value" gdbarch method. */
2026
2027 static enum return_value_convention
2028 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2029 struct type *valtype, struct regcache *regcache,
2030 gdb_byte *readbuf, const gdb_byte *writebuf)
2031 {
2032
2033 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2034 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2035 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2036 {
2037 if (aarch64_return_in_memory (gdbarch, valtype))
2038 {
2039 if (aarch64_debug)
2040 debug_printf ("return value in memory\n");
2041 return RETURN_VALUE_STRUCT_CONVENTION;
2042 }
2043 }
2044
2045 if (writebuf)
2046 aarch64_store_return_value (valtype, regcache, writebuf);
2047
2048 if (readbuf)
2049 aarch64_extract_return_value (valtype, regcache, readbuf);
2050
2051 if (aarch64_debug)
2052 debug_printf ("return value in registers\n");
2053
2054 return RETURN_VALUE_REGISTER_CONVENTION;
2055 }
2056
2057 /* Implement the "get_longjmp_target" gdbarch method. */
2058
2059 static int
2060 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2061 {
2062 CORE_ADDR jb_addr;
2063 gdb_byte buf[X_REGISTER_SIZE];
2064 struct gdbarch *gdbarch = get_frame_arch (frame);
2065 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2066 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2067
2068 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2069
2070 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2071 X_REGISTER_SIZE))
2072 return 0;
2073
2074 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2075 return 1;
2076 }
2077
2078 /* Implement the "gen_return_address" gdbarch method. */
2079
2080 static void
2081 aarch64_gen_return_address (struct gdbarch *gdbarch,
2082 struct agent_expr *ax, struct axs_value *value,
2083 CORE_ADDR scope)
2084 {
2085 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2086 value->kind = axs_lvalue_register;
2087 value->u.reg = AARCH64_LR_REGNUM;
2088 }
2089 \f
2090
2091 /* Return the pseudo register name corresponding to register regnum. */
2092
2093 static const char *
2094 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2095 {
2096 static const char *const q_name[] =
2097 {
2098 "q0", "q1", "q2", "q3",
2099 "q4", "q5", "q6", "q7",
2100 "q8", "q9", "q10", "q11",
2101 "q12", "q13", "q14", "q15",
2102 "q16", "q17", "q18", "q19",
2103 "q20", "q21", "q22", "q23",
2104 "q24", "q25", "q26", "q27",
2105 "q28", "q29", "q30", "q31",
2106 };
2107
2108 static const char *const d_name[] =
2109 {
2110 "d0", "d1", "d2", "d3",
2111 "d4", "d5", "d6", "d7",
2112 "d8", "d9", "d10", "d11",
2113 "d12", "d13", "d14", "d15",
2114 "d16", "d17", "d18", "d19",
2115 "d20", "d21", "d22", "d23",
2116 "d24", "d25", "d26", "d27",
2117 "d28", "d29", "d30", "d31",
2118 };
2119
2120 static const char *const s_name[] =
2121 {
2122 "s0", "s1", "s2", "s3",
2123 "s4", "s5", "s6", "s7",
2124 "s8", "s9", "s10", "s11",
2125 "s12", "s13", "s14", "s15",
2126 "s16", "s17", "s18", "s19",
2127 "s20", "s21", "s22", "s23",
2128 "s24", "s25", "s26", "s27",
2129 "s28", "s29", "s30", "s31",
2130 };
2131
2132 static const char *const h_name[] =
2133 {
2134 "h0", "h1", "h2", "h3",
2135 "h4", "h5", "h6", "h7",
2136 "h8", "h9", "h10", "h11",
2137 "h12", "h13", "h14", "h15",
2138 "h16", "h17", "h18", "h19",
2139 "h20", "h21", "h22", "h23",
2140 "h24", "h25", "h26", "h27",
2141 "h28", "h29", "h30", "h31",
2142 };
2143
2144 static const char *const b_name[] =
2145 {
2146 "b0", "b1", "b2", "b3",
2147 "b4", "b5", "b6", "b7",
2148 "b8", "b9", "b10", "b11",
2149 "b12", "b13", "b14", "b15",
2150 "b16", "b17", "b18", "b19",
2151 "b20", "b21", "b22", "b23",
2152 "b24", "b25", "b26", "b27",
2153 "b28", "b29", "b30", "b31",
2154 };
2155
2156 regnum -= gdbarch_num_regs (gdbarch);
2157
2158 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2159 return q_name[regnum - AARCH64_Q0_REGNUM];
2160
2161 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2162 return d_name[regnum - AARCH64_D0_REGNUM];
2163
2164 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2165 return s_name[regnum - AARCH64_S0_REGNUM];
2166
2167 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2168 return h_name[regnum - AARCH64_H0_REGNUM];
2169
2170 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2171 return b_name[regnum - AARCH64_B0_REGNUM];
2172
2173 internal_error (__FILE__, __LINE__,
2174 _("aarch64_pseudo_register_name: bad register number %d"),
2175 regnum);
2176 }
2177
2178 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2179
2180 static struct type *
2181 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2182 {
2183 regnum -= gdbarch_num_regs (gdbarch);
2184
2185 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2186 return aarch64_vnq_type (gdbarch);
2187
2188 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2189 return aarch64_vnd_type (gdbarch);
2190
2191 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2192 return aarch64_vns_type (gdbarch);
2193
2194 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2195 return aarch64_vnh_type (gdbarch);
2196
2197 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2198 return aarch64_vnb_type (gdbarch);
2199
2200 internal_error (__FILE__, __LINE__,
2201 _("aarch64_pseudo_register_type: bad register number %d"),
2202 regnum);
2203 }
2204
2205 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2206
2207 static int
2208 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2209 struct reggroup *group)
2210 {
2211 regnum -= gdbarch_num_regs (gdbarch);
2212
2213 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2214 return group == all_reggroup || group == vector_reggroup;
2215 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2216 return (group == all_reggroup || group == vector_reggroup
2217 || group == float_reggroup);
2218 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2219 return (group == all_reggroup || group == vector_reggroup
2220 || group == float_reggroup);
2221 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2222 return group == all_reggroup || group == vector_reggroup;
2223 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2224 return group == all_reggroup || group == vector_reggroup;
2225
2226 return group == all_reggroup;
2227 }
2228
2229 /* Implement the "pseudo_register_read_value" gdbarch method. */
2230
2231 static struct value *
2232 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2233 readable_regcache *regcache,
2234 int regnum)
2235 {
2236 gdb_byte reg_buf[V_REGISTER_SIZE];
2237 struct value *result_value;
2238 gdb_byte *buf;
2239
2240 result_value = allocate_value (register_type (gdbarch, regnum));
2241 VALUE_LVAL (result_value) = lval_register;
2242 VALUE_REGNUM (result_value) = regnum;
2243 buf = value_contents_raw (result_value);
2244
2245 regnum -= gdbarch_num_regs (gdbarch);
2246
2247 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2248 {
2249 enum register_status status;
2250 unsigned v_regnum;
2251
2252 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2253 status = regcache->raw_read (v_regnum, reg_buf);
2254 if (status != REG_VALID)
2255 mark_value_bytes_unavailable (result_value, 0,
2256 TYPE_LENGTH (value_type (result_value)));
2257 else
2258 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2259 return result_value;
2260 }
2261
2262 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2263 {
2264 enum register_status status;
2265 unsigned v_regnum;
2266
2267 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2268 status = regcache->raw_read (v_regnum, reg_buf);
2269 if (status != REG_VALID)
2270 mark_value_bytes_unavailable (result_value, 0,
2271 TYPE_LENGTH (value_type (result_value)));
2272 else
2273 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2274 return result_value;
2275 }
2276
2277 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2278 {
2279 enum register_status status;
2280 unsigned v_regnum;
2281
2282 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2283 status = regcache->raw_read (v_regnum, reg_buf);
2284 if (status != REG_VALID)
2285 mark_value_bytes_unavailable (result_value, 0,
2286 TYPE_LENGTH (value_type (result_value)));
2287 else
2288 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2289 return result_value;
2290 }
2291
2292 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2293 {
2294 enum register_status status;
2295 unsigned v_regnum;
2296
2297 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2298 status = regcache->raw_read (v_regnum, reg_buf);
2299 if (status != REG_VALID)
2300 mark_value_bytes_unavailable (result_value, 0,
2301 TYPE_LENGTH (value_type (result_value)));
2302 else
2303 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2304 return result_value;
2305 }
2306
2307 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2308 {
2309 enum register_status status;
2310 unsigned v_regnum;
2311
2312 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2313 status = regcache->raw_read (v_regnum, reg_buf);
2314 if (status != REG_VALID)
2315 mark_value_bytes_unavailable (result_value, 0,
2316 TYPE_LENGTH (value_type (result_value)));
2317 else
2318 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2319 return result_value;
2320 }
2321
2322 gdb_assert_not_reached ("regnum out of bound");
2323 }
2324
2325 /* Implement the "pseudo_register_write" gdbarch method. */
2326
2327 static void
2328 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2329 int regnum, const gdb_byte *buf)
2330 {
2331 gdb_byte reg_buf[V_REGISTER_SIZE];
2332
2333 /* Ensure the register buffer is zero, we want gdb writes of the
2334 various 'scalar' pseudo registers to behavior like architectural
2335 writes, register width bytes are written the remainder are set to
2336 zero. */
2337 memset (reg_buf, 0, sizeof (reg_buf));
2338
2339 regnum -= gdbarch_num_regs (gdbarch);
2340
2341 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2342 {
2343 /* pseudo Q registers */
2344 unsigned v_regnum;
2345
2346 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2347 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2348 regcache->raw_write (v_regnum, reg_buf);
2349 return;
2350 }
2351
2352 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2353 {
2354 /* pseudo D registers */
2355 unsigned v_regnum;
2356
2357 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2358 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2359 regcache->raw_write (v_regnum, reg_buf);
2360 return;
2361 }
2362
2363 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2364 {
2365 unsigned v_regnum;
2366
2367 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2368 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2369 regcache->raw_write (v_regnum, reg_buf);
2370 return;
2371 }
2372
2373 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2374 {
2375 /* pseudo H registers */
2376 unsigned v_regnum;
2377
2378 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2379 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2380 regcache->raw_write (v_regnum, reg_buf);
2381 return;
2382 }
2383
2384 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2385 {
2386 /* pseudo B registers */
2387 unsigned v_regnum;
2388
2389 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2390 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2391 regcache->raw_write (v_regnum, reg_buf);
2392 return;
2393 }
2394
2395 gdb_assert_not_reached ("regnum out of bound");
2396 }
2397
2398 /* Callback function for user_reg_add. */
2399
2400 static struct value *
2401 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2402 {
2403 const int *reg_p = (const int *) baton;
2404
2405 return value_of_register (*reg_p, frame);
2406 }
2407 \f
2408
2409 /* Implement the "software_single_step" gdbarch method, needed to
2410 single step through atomic sequences on AArch64. */
2411
2412 static std::vector<CORE_ADDR>
2413 aarch64_software_single_step (struct regcache *regcache)
2414 {
2415 struct gdbarch *gdbarch = regcache->arch ();
2416 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2417 const int insn_size = 4;
2418 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2419 CORE_ADDR pc = regcache_read_pc (regcache);
2420 CORE_ADDR breaks[2] = { -1, -1 };
2421 CORE_ADDR loc = pc;
2422 CORE_ADDR closing_insn = 0;
2423 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2424 byte_order_for_code);
2425 int index;
2426 int insn_count;
2427 int bc_insn_count = 0; /* Conditional branch instruction count. */
2428 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2429 aarch64_inst inst;
2430
2431 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2432 return {};
2433
2434 /* Look for a Load Exclusive instruction which begins the sequence. */
2435 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2436 return {};
2437
2438 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2439 {
2440 loc += insn_size;
2441 insn = read_memory_unsigned_integer (loc, insn_size,
2442 byte_order_for_code);
2443
2444 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2445 return {};
2446 /* Check if the instruction is a conditional branch. */
2447 if (inst.opcode->iclass == condbranch)
2448 {
2449 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2450
2451 if (bc_insn_count >= 1)
2452 return {};
2453
2454 /* It is, so we'll try to set a breakpoint at the destination. */
2455 breaks[1] = loc + inst.operands[0].imm.value;
2456
2457 bc_insn_count++;
2458 last_breakpoint++;
2459 }
2460
2461 /* Look for the Store Exclusive which closes the atomic sequence. */
2462 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2463 {
2464 closing_insn = loc;
2465 break;
2466 }
2467 }
2468
2469 /* We didn't find a closing Store Exclusive instruction, fall back. */
2470 if (!closing_insn)
2471 return {};
2472
2473 /* Insert breakpoint after the end of the atomic sequence. */
2474 breaks[0] = loc + insn_size;
2475
2476 /* Check for duplicated breakpoints, and also check that the second
2477 breakpoint is not within the atomic sequence. */
2478 if (last_breakpoint
2479 && (breaks[1] == breaks[0]
2480 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2481 last_breakpoint = 0;
2482
2483 std::vector<CORE_ADDR> next_pcs;
2484
2485 /* Insert the breakpoint at the end of the sequence, and one at the
2486 destination of the conditional branch, if it exists. */
2487 for (index = 0; index <= last_breakpoint; index++)
2488 next_pcs.push_back (breaks[index]);
2489
2490 return next_pcs;
2491 }
2492
2493 struct aarch64_displaced_step_closure : public displaced_step_closure
2494 {
2495 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2496 is being displaced stepping. */
2497 int cond = 0;
2498
2499 /* PC adjustment offset after displaced stepping. */
2500 int32_t pc_adjust = 0;
2501 };
2502
2503 /* Data when visiting instructions for displaced stepping. */
2504
2505 struct aarch64_displaced_step_data
2506 {
2507 struct aarch64_insn_data base;
2508
2509 /* The address where the instruction will be executed at. */
2510 CORE_ADDR new_addr;
2511 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2512 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2513 /* Number of instructions in INSN_BUF. */
2514 unsigned insn_count;
2515 /* Registers when doing displaced stepping. */
2516 struct regcache *regs;
2517
2518 aarch64_displaced_step_closure *dsc;
2519 };
2520
2521 /* Implementation of aarch64_insn_visitor method "b". */
2522
2523 static void
2524 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2525 struct aarch64_insn_data *data)
2526 {
2527 struct aarch64_displaced_step_data *dsd
2528 = (struct aarch64_displaced_step_data *) data;
2529 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2530
2531 if (can_encode_int32 (new_offset, 28))
2532 {
2533 /* Emit B rather than BL, because executing BL on a new address
2534 will get the wrong address into LR. In order to avoid this,
2535 we emit B, and update LR if the instruction is BL. */
2536 emit_b (dsd->insn_buf, 0, new_offset);
2537 dsd->insn_count++;
2538 }
2539 else
2540 {
2541 /* Write NOP. */
2542 emit_nop (dsd->insn_buf);
2543 dsd->insn_count++;
2544 dsd->dsc->pc_adjust = offset;
2545 }
2546
2547 if (is_bl)
2548 {
2549 /* Update LR. */
2550 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2551 data->insn_addr + 4);
2552 }
2553 }
2554
2555 /* Implementation of aarch64_insn_visitor method "b_cond". */
2556
2557 static void
2558 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2559 struct aarch64_insn_data *data)
2560 {
2561 struct aarch64_displaced_step_data *dsd
2562 = (struct aarch64_displaced_step_data *) data;
2563
2564 /* GDB has to fix up PC after displaced step this instruction
2565 differently according to the condition is true or false. Instead
2566 of checking COND against conditional flags, we can use
2567 the following instructions, and GDB can tell how to fix up PC
2568 according to the PC value.
2569
2570 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2571 INSN1 ;
2572 TAKEN:
2573 INSN2
2574 */
2575
2576 emit_bcond (dsd->insn_buf, cond, 8);
2577 dsd->dsc->cond = 1;
2578 dsd->dsc->pc_adjust = offset;
2579 dsd->insn_count = 1;
2580 }
2581
2582 /* Dynamically allocate a new register. If we know the register
2583 statically, we should make it a global as above instead of using this
2584 helper function. */
2585
2586 static struct aarch64_register
2587 aarch64_register (unsigned num, int is64)
2588 {
2589 return (struct aarch64_register) { num, is64 };
2590 }
2591
2592 /* Implementation of aarch64_insn_visitor method "cb". */
2593
2594 static void
2595 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2596 const unsigned rn, int is64,
2597 struct aarch64_insn_data *data)
2598 {
2599 struct aarch64_displaced_step_data *dsd
2600 = (struct aarch64_displaced_step_data *) data;
2601
2602 /* The offset is out of range for a compare and branch
2603 instruction. We can use the following instructions instead:
2604
2605 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2606 INSN1 ;
2607 TAKEN:
2608 INSN2
2609 */
2610 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2611 dsd->insn_count = 1;
2612 dsd->dsc->cond = 1;
2613 dsd->dsc->pc_adjust = offset;
2614 }
2615
2616 /* Implementation of aarch64_insn_visitor method "tb". */
2617
2618 static void
2619 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2620 const unsigned rt, unsigned bit,
2621 struct aarch64_insn_data *data)
2622 {
2623 struct aarch64_displaced_step_data *dsd
2624 = (struct aarch64_displaced_step_data *) data;
2625
2626 /* The offset is out of range for a test bit and branch
2627 instruction We can use the following instructions instead:
2628
2629 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2630 INSN1 ;
2631 TAKEN:
2632 INSN2
2633
2634 */
2635 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2636 dsd->insn_count = 1;
2637 dsd->dsc->cond = 1;
2638 dsd->dsc->pc_adjust = offset;
2639 }
2640
2641 /* Implementation of aarch64_insn_visitor method "adr". */
2642
2643 static void
2644 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2645 const int is_adrp, struct aarch64_insn_data *data)
2646 {
2647 struct aarch64_displaced_step_data *dsd
2648 = (struct aarch64_displaced_step_data *) data;
2649 /* We know exactly the address the ADR{P,} instruction will compute.
2650 We can just write it to the destination register. */
2651 CORE_ADDR address = data->insn_addr + offset;
2652
2653 if (is_adrp)
2654 {
2655 /* Clear the lower 12 bits of the offset to get the 4K page. */
2656 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2657 address & ~0xfff);
2658 }
2659 else
2660 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2661 address);
2662
2663 dsd->dsc->pc_adjust = 4;
2664 emit_nop (dsd->insn_buf);
2665 dsd->insn_count = 1;
2666 }
2667
2668 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2669
2670 static void
2671 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2672 const unsigned rt, const int is64,
2673 struct aarch64_insn_data *data)
2674 {
2675 struct aarch64_displaced_step_data *dsd
2676 = (struct aarch64_displaced_step_data *) data;
2677 CORE_ADDR address = data->insn_addr + offset;
2678 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2679
2680 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2681 address);
2682
2683 if (is_sw)
2684 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2685 aarch64_register (rt, 1), zero);
2686 else
2687 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2688 aarch64_register (rt, 1), zero);
2689
2690 dsd->dsc->pc_adjust = 4;
2691 }
2692
2693 /* Implementation of aarch64_insn_visitor method "others". */
2694
2695 static void
2696 aarch64_displaced_step_others (const uint32_t insn,
2697 struct aarch64_insn_data *data)
2698 {
2699 struct aarch64_displaced_step_data *dsd
2700 = (struct aarch64_displaced_step_data *) data;
2701
2702 aarch64_emit_insn (dsd->insn_buf, insn);
2703 dsd->insn_count = 1;
2704
2705 if ((insn & 0xfffffc1f) == 0xd65f0000)
2706 {
2707 /* RET */
2708 dsd->dsc->pc_adjust = 0;
2709 }
2710 else
2711 dsd->dsc->pc_adjust = 4;
2712 }
2713
2714 static const struct aarch64_insn_visitor visitor =
2715 {
2716 aarch64_displaced_step_b,
2717 aarch64_displaced_step_b_cond,
2718 aarch64_displaced_step_cb,
2719 aarch64_displaced_step_tb,
2720 aarch64_displaced_step_adr,
2721 aarch64_displaced_step_ldr_literal,
2722 aarch64_displaced_step_others,
2723 };
2724
2725 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2726
2727 struct displaced_step_closure *
2728 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2729 CORE_ADDR from, CORE_ADDR to,
2730 struct regcache *regs)
2731 {
2732 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2733 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2734 struct aarch64_displaced_step_data dsd;
2735 aarch64_inst inst;
2736
2737 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2738 return NULL;
2739
2740 /* Look for a Load Exclusive instruction which begins the sequence. */
2741 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2742 {
2743 /* We can't displaced step atomic sequences. */
2744 return NULL;
2745 }
2746
2747 std::unique_ptr<aarch64_displaced_step_closure> dsc
2748 (new aarch64_displaced_step_closure);
2749 dsd.base.insn_addr = from;
2750 dsd.new_addr = to;
2751 dsd.regs = regs;
2752 dsd.dsc = dsc.get ();
2753 dsd.insn_count = 0;
2754 aarch64_relocate_instruction (insn, &visitor,
2755 (struct aarch64_insn_data *) &dsd);
2756 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2757
2758 if (dsd.insn_count != 0)
2759 {
2760 int i;
2761
2762 /* Instruction can be relocated to scratch pad. Copy
2763 relocated instruction(s) there. */
2764 for (i = 0; i < dsd.insn_count; i++)
2765 {
2766 if (debug_displaced)
2767 {
2768 debug_printf ("displaced: writing insn ");
2769 debug_printf ("%.8x", dsd.insn_buf[i]);
2770 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2771 }
2772 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2773 (ULONGEST) dsd.insn_buf[i]);
2774 }
2775 }
2776 else
2777 {
2778 dsc = NULL;
2779 }
2780
2781 return dsc.release ();
2782 }
2783
2784 /* Implement the "displaced_step_fixup" gdbarch method. */
2785
2786 void
2787 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2788 struct displaced_step_closure *dsc_,
2789 CORE_ADDR from, CORE_ADDR to,
2790 struct regcache *regs)
2791 {
2792 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2793
2794 if (dsc->cond)
2795 {
2796 ULONGEST pc;
2797
2798 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2799 if (pc - to == 8)
2800 {
2801 /* Condition is true. */
2802 }
2803 else if (pc - to == 4)
2804 {
2805 /* Condition is false. */
2806 dsc->pc_adjust = 4;
2807 }
2808 else
2809 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2810 }
2811
2812 if (dsc->pc_adjust != 0)
2813 {
2814 if (debug_displaced)
2815 {
2816 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2817 paddress (gdbarch, from), dsc->pc_adjust);
2818 }
2819 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2820 from + dsc->pc_adjust);
2821 }
2822 }
2823
2824 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2825
2826 int
2827 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2828 struct displaced_step_closure *closure)
2829 {
2830 return 1;
2831 }
2832
2833 /* Get the correct target description for the given VQ value.
2834 If VQ is zero then it is assumed SVE is not supported.
2835 (It is not possible to set VQ to zero on an SVE system). */
2836
2837 const target_desc *
2838 aarch64_read_description (long vq)
2839 {
2840 if (vq > AARCH64_MAX_SVE_VQ)
2841 error (_("VQ is %ld, maximum supported value is %d"), vq,
2842 AARCH64_MAX_SVE_VQ);
2843
2844 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2845
2846 if (tdesc == NULL)
2847 {
2848 tdesc = aarch64_create_target_description (vq);
2849 tdesc_aarch64_list[vq] = tdesc;
2850 }
2851
2852 return tdesc;
2853 }
2854
2855 /* Initialize the current architecture based on INFO. If possible,
2856 re-use an architecture from ARCHES, which is a list of
2857 architectures already created during this debugging session.
2858
2859 Called e.g. at program startup, when reading a core file, and when
2860 reading a binary file. */
2861
2862 static struct gdbarch *
2863 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2864 {
2865 struct gdbarch_tdep *tdep;
2866 struct gdbarch *gdbarch;
2867 struct gdbarch_list *best_arch;
2868 struct tdesc_arch_data *tdesc_data = NULL;
2869 const struct target_desc *tdesc = info.target_desc;
2870 int i;
2871 int valid_p = 1;
2872 const struct tdesc_feature *feature;
2873 int num_regs = 0;
2874 int num_pseudo_regs = 0;
2875
2876 /* Ensure we always have a target descriptor. */
2877 if (!tdesc_has_registers (tdesc))
2878 {
2879 /* SVE is not yet supported. */
2880 tdesc = aarch64_read_description (0);
2881 }
2882
2883 gdb_assert (tdesc);
2884
2885 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2886
2887 if (feature == NULL)
2888 return NULL;
2889
2890 tdesc_data = tdesc_data_alloc ();
2891
2892 /* Validate the descriptor provides the mandatory core R registers
2893 and allocate their numbers. */
2894 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2895 valid_p &=
2896 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2897 aarch64_r_register_names[i]);
2898
2899 num_regs = AARCH64_X0_REGNUM + i;
2900
2901 /* Look for the V registers. */
2902 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2903 if (feature)
2904 {
2905 /* Validate the descriptor provides the mandatory V registers
2906 and allocate their numbers. */
2907 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2908 valid_p &=
2909 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2910 aarch64_v_register_names[i]);
2911
2912 num_regs = AARCH64_V0_REGNUM + i;
2913
2914 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2915 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2916 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2917 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2918 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2919 }
2920
2921 if (!valid_p)
2922 {
2923 tdesc_data_cleanup (tdesc_data);
2924 return NULL;
2925 }
2926
2927 /* AArch64 code is always little-endian. */
2928 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2929
2930 /* If there is already a candidate, use it. */
2931 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2932 best_arch != NULL;
2933 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2934 {
2935 /* Found a match. */
2936 break;
2937 }
2938
2939 if (best_arch != NULL)
2940 {
2941 if (tdesc_data != NULL)
2942 tdesc_data_cleanup (tdesc_data);
2943 return best_arch->gdbarch;
2944 }
2945
2946 tdep = XCNEW (struct gdbarch_tdep);
2947 gdbarch = gdbarch_alloc (&info, tdep);
2948
2949 /* This should be low enough for everything. */
2950 tdep->lowest_pc = 0x20;
2951 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2952 tdep->jb_elt_size = 8;
2953
2954 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2955 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2956
2957 /* Frame handling. */
2958 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2959 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2960 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2961
2962 /* Advance PC across function entry code. */
2963 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2964
2965 /* The stack grows downward. */
2966 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2967
2968 /* Breakpoint manipulation. */
2969 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2970 aarch64_breakpoint::kind_from_pc);
2971 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2972 aarch64_breakpoint::bp_from_kind);
2973 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2974 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2975
2976 /* Information about registers, etc. */
2977 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2978 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2979 set_gdbarch_num_regs (gdbarch, num_regs);
2980
2981 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2982 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2983 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2984 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2985 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2986 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2987 aarch64_pseudo_register_reggroup_p);
2988
2989 /* ABI */
2990 set_gdbarch_short_bit (gdbarch, 16);
2991 set_gdbarch_int_bit (gdbarch, 32);
2992 set_gdbarch_float_bit (gdbarch, 32);
2993 set_gdbarch_double_bit (gdbarch, 64);
2994 set_gdbarch_long_double_bit (gdbarch, 128);
2995 set_gdbarch_long_bit (gdbarch, 64);
2996 set_gdbarch_long_long_bit (gdbarch, 64);
2997 set_gdbarch_ptr_bit (gdbarch, 64);
2998 set_gdbarch_char_signed (gdbarch, 0);
2999 set_gdbarch_wchar_signed (gdbarch, 0);
3000 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3001 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3002 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3003
3004 /* Internal <-> external register number maps. */
3005 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3006
3007 /* Returning results. */
3008 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3009
3010 /* Disassembly. */
3011 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3012
3013 /* Virtual tables. */
3014 set_gdbarch_vbit_in_delta (gdbarch, 1);
3015
3016 /* Hook in the ABI-specific overrides, if they have been registered. */
3017 info.target_desc = tdesc;
3018 info.tdesc_data = tdesc_data;
3019 gdbarch_init_osabi (info, gdbarch);
3020
3021 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3022
3023 /* Add some default predicates. */
3024 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3025 dwarf2_append_unwinders (gdbarch);
3026 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3027
3028 frame_base_set_default (gdbarch, &aarch64_normal_base);
3029
3030 /* Now we have tuned the configuration, set a few final things,
3031 based on what the OS ABI has told us. */
3032
3033 if (tdep->jb_pc >= 0)
3034 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3035
3036 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3037
3038 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3039
3040 /* Add standard register aliases. */
3041 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3042 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3043 value_of_aarch64_user_reg,
3044 &aarch64_register_aliases[i].regnum);
3045
3046 return gdbarch;
3047 }
3048
3049 static void
3050 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3051 {
3052 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3053
3054 if (tdep == NULL)
3055 return;
3056
3057 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3058 paddress (gdbarch, tdep->lowest_pc));
3059 }
3060
3061 #if GDB_SELF_TEST
3062 namespace selftests
3063 {
3064 static void aarch64_process_record_test (void);
3065 }
3066 #endif
3067
3068 void
3069 _initialize_aarch64_tdep (void)
3070 {
3071 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3072 aarch64_dump_tdep);
3073
3074 /* Debug this file's internals. */
3075 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3076 Set AArch64 debugging."), _("\
3077 Show AArch64 debugging."), _("\
3078 When on, AArch64 specific debugging is enabled."),
3079 NULL,
3080 show_aarch64_debug,
3081 &setdebuglist, &showdebuglist);
3082
3083 #if GDB_SELF_TEST
3084 selftests::register_test ("aarch64-analyze-prologue",
3085 selftests::aarch64_analyze_prologue_test);
3086 selftests::register_test ("aarch64-process-record",
3087 selftests::aarch64_process_record_test);
3088 selftests::record_xml_tdesc ("aarch64.xml",
3089 aarch64_create_target_description (0));
3090 #endif
3091 }
3092
3093 /* AArch64 process record-replay related structures, defines etc. */
3094
3095 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3096 do \
3097 { \
3098 unsigned int reg_len = LENGTH; \
3099 if (reg_len) \
3100 { \
3101 REGS = XNEWVEC (uint32_t, reg_len); \
3102 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3103 } \
3104 } \
3105 while (0)
3106
3107 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3108 do \
3109 { \
3110 unsigned int mem_len = LENGTH; \
3111 if (mem_len) \
3112 { \
3113 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3114 memcpy(&MEMS->len, &RECORD_BUF[0], \
3115 sizeof(struct aarch64_mem_r) * LENGTH); \
3116 } \
3117 } \
3118 while (0)
3119
3120 /* AArch64 record/replay structures and enumerations. */
3121
3122 struct aarch64_mem_r
3123 {
3124 uint64_t len; /* Record length. */
3125 uint64_t addr; /* Memory address. */
3126 };
3127
3128 enum aarch64_record_result
3129 {
3130 AARCH64_RECORD_SUCCESS,
3131 AARCH64_RECORD_UNSUPPORTED,
3132 AARCH64_RECORD_UNKNOWN
3133 };
3134
3135 typedef struct insn_decode_record_t
3136 {
3137 struct gdbarch *gdbarch;
3138 struct regcache *regcache;
3139 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3140 uint32_t aarch64_insn; /* Insn to be recorded. */
3141 uint32_t mem_rec_count; /* Count of memory records. */
3142 uint32_t reg_rec_count; /* Count of register records. */
3143 uint32_t *aarch64_regs; /* Registers to be recorded. */
3144 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3145 } insn_decode_record;
3146
3147 /* Record handler for data processing - register instructions. */
3148
3149 static unsigned int
3150 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3151 {
3152 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3153 uint32_t record_buf[4];
3154
3155 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3156 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3157 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3158
3159 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3160 {
3161 uint8_t setflags;
3162
3163 /* Logical (shifted register). */
3164 if (insn_bits24_27 == 0x0a)
3165 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3166 /* Add/subtract. */
3167 else if (insn_bits24_27 == 0x0b)
3168 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3169 else
3170 return AARCH64_RECORD_UNKNOWN;
3171
3172 record_buf[0] = reg_rd;
3173 aarch64_insn_r->reg_rec_count = 1;
3174 if (setflags)
3175 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3176 }
3177 else
3178 {
3179 if (insn_bits24_27 == 0x0b)
3180 {
3181 /* Data-processing (3 source). */
3182 record_buf[0] = reg_rd;
3183 aarch64_insn_r->reg_rec_count = 1;
3184 }
3185 else if (insn_bits24_27 == 0x0a)
3186 {
3187 if (insn_bits21_23 == 0x00)
3188 {
3189 /* Add/subtract (with carry). */
3190 record_buf[0] = reg_rd;
3191 aarch64_insn_r->reg_rec_count = 1;
3192 if (bit (aarch64_insn_r->aarch64_insn, 29))
3193 {
3194 record_buf[1] = AARCH64_CPSR_REGNUM;
3195 aarch64_insn_r->reg_rec_count = 2;
3196 }
3197 }
3198 else if (insn_bits21_23 == 0x02)
3199 {
3200 /* Conditional compare (register) and conditional compare
3201 (immediate) instructions. */
3202 record_buf[0] = AARCH64_CPSR_REGNUM;
3203 aarch64_insn_r->reg_rec_count = 1;
3204 }
3205 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3206 {
3207 /* CConditional select. */
3208 /* Data-processing (2 source). */
3209 /* Data-processing (1 source). */
3210 record_buf[0] = reg_rd;
3211 aarch64_insn_r->reg_rec_count = 1;
3212 }
3213 else
3214 return AARCH64_RECORD_UNKNOWN;
3215 }
3216 }
3217
3218 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3219 record_buf);
3220 return AARCH64_RECORD_SUCCESS;
3221 }
3222
3223 /* Record handler for data processing - immediate instructions. */
3224
3225 static unsigned int
3226 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3227 {
3228 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3229 uint32_t record_buf[4];
3230
3231 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3232 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3233 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3234
3235 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3236 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3237 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3238 {
3239 record_buf[0] = reg_rd;
3240 aarch64_insn_r->reg_rec_count = 1;
3241 }
3242 else if (insn_bits24_27 == 0x01)
3243 {
3244 /* Add/Subtract (immediate). */
3245 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3246 record_buf[0] = reg_rd;
3247 aarch64_insn_r->reg_rec_count = 1;
3248 if (setflags)
3249 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3250 }
3251 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3252 {
3253 /* Logical (immediate). */
3254 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3255 record_buf[0] = reg_rd;
3256 aarch64_insn_r->reg_rec_count = 1;
3257 if (setflags)
3258 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3259 }
3260 else
3261 return AARCH64_RECORD_UNKNOWN;
3262
3263 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3264 record_buf);
3265 return AARCH64_RECORD_SUCCESS;
3266 }
3267
3268 /* Record handler for branch, exception generation and system instructions. */
3269
3270 static unsigned int
3271 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3272 {
3273 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3274 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3275 uint32_t record_buf[4];
3276
3277 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3278 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3279 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3280
3281 if (insn_bits28_31 == 0x0d)
3282 {
3283 /* Exception generation instructions. */
3284 if (insn_bits24_27 == 0x04)
3285 {
3286 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3287 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3288 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3289 {
3290 ULONGEST svc_number;
3291
3292 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3293 &svc_number);
3294 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3295 svc_number);
3296 }
3297 else
3298 return AARCH64_RECORD_UNSUPPORTED;
3299 }
3300 /* System instructions. */
3301 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3302 {
3303 uint32_t reg_rt, reg_crn;
3304
3305 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3306 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3307
3308 /* Record rt in case of sysl and mrs instructions. */
3309 if (bit (aarch64_insn_r->aarch64_insn, 21))
3310 {
3311 record_buf[0] = reg_rt;
3312 aarch64_insn_r->reg_rec_count = 1;
3313 }
3314 /* Record cpsr for hint and msr(immediate) instructions. */
3315 else if (reg_crn == 0x02 || reg_crn == 0x04)
3316 {
3317 record_buf[0] = AARCH64_CPSR_REGNUM;
3318 aarch64_insn_r->reg_rec_count = 1;
3319 }
3320 }
3321 /* Unconditional branch (register). */
3322 else if((insn_bits24_27 & 0x0e) == 0x06)
3323 {
3324 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3325 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3326 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3327 }
3328 else
3329 return AARCH64_RECORD_UNKNOWN;
3330 }
3331 /* Unconditional branch (immediate). */
3332 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3333 {
3334 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3335 if (bit (aarch64_insn_r->aarch64_insn, 31))
3336 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3337 }
3338 else
3339 /* Compare & branch (immediate), Test & branch (immediate) and
3340 Conditional branch (immediate). */
3341 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3342
3343 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3344 record_buf);
3345 return AARCH64_RECORD_SUCCESS;
3346 }
3347
3348 /* Record handler for advanced SIMD load and store instructions. */
3349
3350 static unsigned int
3351 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3352 {
3353 CORE_ADDR address;
3354 uint64_t addr_offset = 0;
3355 uint32_t record_buf[24];
3356 uint64_t record_buf_mem[24];
3357 uint32_t reg_rn, reg_rt;
3358 uint32_t reg_index = 0, mem_index = 0;
3359 uint8_t opcode_bits, size_bits;
3360
3361 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3362 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3363 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3364 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3365 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3366
3367 if (record_debug)
3368 debug_printf ("Process record: Advanced SIMD load/store\n");
3369
3370 /* Load/store single structure. */
3371 if (bit (aarch64_insn_r->aarch64_insn, 24))
3372 {
3373 uint8_t sindex, scale, selem, esize, replicate = 0;
3374 scale = opcode_bits >> 2;
3375 selem = ((opcode_bits & 0x02) |
3376 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3377 switch (scale)
3378 {
3379 case 1:
3380 if (size_bits & 0x01)
3381 return AARCH64_RECORD_UNKNOWN;
3382 break;
3383 case 2:
3384 if ((size_bits >> 1) & 0x01)
3385 return AARCH64_RECORD_UNKNOWN;
3386 if (size_bits & 0x01)
3387 {
3388 if (!((opcode_bits >> 1) & 0x01))
3389 scale = 3;
3390 else
3391 return AARCH64_RECORD_UNKNOWN;
3392 }
3393 break;
3394 case 3:
3395 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3396 {
3397 scale = size_bits;
3398 replicate = 1;
3399 break;
3400 }
3401 else
3402 return AARCH64_RECORD_UNKNOWN;
3403 default:
3404 break;
3405 }
3406 esize = 8 << scale;
3407 if (replicate)
3408 for (sindex = 0; sindex < selem; sindex++)
3409 {
3410 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3411 reg_rt = (reg_rt + 1) % 32;
3412 }
3413 else
3414 {
3415 for (sindex = 0; sindex < selem; sindex++)
3416 {
3417 if (bit (aarch64_insn_r->aarch64_insn, 22))
3418 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3419 else
3420 {
3421 record_buf_mem[mem_index++] = esize / 8;
3422 record_buf_mem[mem_index++] = address + addr_offset;
3423 }
3424 addr_offset = addr_offset + (esize / 8);
3425 reg_rt = (reg_rt + 1) % 32;
3426 }
3427 }
3428 }
3429 /* Load/store multiple structure. */
3430 else
3431 {
3432 uint8_t selem, esize, rpt, elements;
3433 uint8_t eindex, rindex;
3434
3435 esize = 8 << size_bits;
3436 if (bit (aarch64_insn_r->aarch64_insn, 30))
3437 elements = 128 / esize;
3438 else
3439 elements = 64 / esize;
3440
3441 switch (opcode_bits)
3442 {
3443 /*LD/ST4 (4 Registers). */
3444 case 0:
3445 rpt = 1;
3446 selem = 4;
3447 break;
3448 /*LD/ST1 (4 Registers). */
3449 case 2:
3450 rpt = 4;
3451 selem = 1;
3452 break;
3453 /*LD/ST3 (3 Registers). */
3454 case 4:
3455 rpt = 1;
3456 selem = 3;
3457 break;
3458 /*LD/ST1 (3 Registers). */
3459 case 6:
3460 rpt = 3;
3461 selem = 1;
3462 break;
3463 /*LD/ST1 (1 Register). */
3464 case 7:
3465 rpt = 1;
3466 selem = 1;
3467 break;
3468 /*LD/ST2 (2 Registers). */
3469 case 8:
3470 rpt = 1;
3471 selem = 2;
3472 break;
3473 /*LD/ST1 (2 Registers). */
3474 case 10:
3475 rpt = 2;
3476 selem = 1;
3477 break;
3478 default:
3479 return AARCH64_RECORD_UNSUPPORTED;
3480 break;
3481 }
3482 for (rindex = 0; rindex < rpt; rindex++)
3483 for (eindex = 0; eindex < elements; eindex++)
3484 {
3485 uint8_t reg_tt, sindex;
3486 reg_tt = (reg_rt + rindex) % 32;
3487 for (sindex = 0; sindex < selem; sindex++)
3488 {
3489 if (bit (aarch64_insn_r->aarch64_insn, 22))
3490 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3491 else
3492 {
3493 record_buf_mem[mem_index++] = esize / 8;
3494 record_buf_mem[mem_index++] = address + addr_offset;
3495 }
3496 addr_offset = addr_offset + (esize / 8);
3497 reg_tt = (reg_tt + 1) % 32;
3498 }
3499 }
3500 }
3501
3502 if (bit (aarch64_insn_r->aarch64_insn, 23))
3503 record_buf[reg_index++] = reg_rn;
3504
3505 aarch64_insn_r->reg_rec_count = reg_index;
3506 aarch64_insn_r->mem_rec_count = mem_index / 2;
3507 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3508 record_buf_mem);
3509 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3510 record_buf);
3511 return AARCH64_RECORD_SUCCESS;
3512 }
3513
3514 /* Record handler for load and store instructions. */
3515
3516 static unsigned int
3517 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3518 {
3519 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3520 uint8_t insn_bit23, insn_bit21;
3521 uint8_t opc, size_bits, ld_flag, vector_flag;
3522 uint32_t reg_rn, reg_rt, reg_rt2;
3523 uint64_t datasize, offset;
3524 uint32_t record_buf[8];
3525 uint64_t record_buf_mem[8];
3526 CORE_ADDR address;
3527
3528 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3529 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3530 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3531 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3532 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3533 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3534 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3535 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3536 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3537 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3538 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3539
3540 /* Load/store exclusive. */
3541 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3542 {
3543 if (record_debug)
3544 debug_printf ("Process record: load/store exclusive\n");
3545
3546 if (ld_flag)
3547 {
3548 record_buf[0] = reg_rt;
3549 aarch64_insn_r->reg_rec_count = 1;
3550 if (insn_bit21)
3551 {
3552 record_buf[1] = reg_rt2;
3553 aarch64_insn_r->reg_rec_count = 2;
3554 }
3555 }
3556 else
3557 {
3558 if (insn_bit21)
3559 datasize = (8 << size_bits) * 2;
3560 else
3561 datasize = (8 << size_bits);
3562 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3563 &address);
3564 record_buf_mem[0] = datasize / 8;
3565 record_buf_mem[1] = address;
3566 aarch64_insn_r->mem_rec_count = 1;
3567 if (!insn_bit23)
3568 {
3569 /* Save register rs. */
3570 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3571 aarch64_insn_r->reg_rec_count = 1;
3572 }
3573 }
3574 }
3575 /* Load register (literal) instructions decoding. */
3576 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3577 {
3578 if (record_debug)
3579 debug_printf ("Process record: load register (literal)\n");
3580 if (vector_flag)
3581 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3582 else
3583 record_buf[0] = reg_rt;
3584 aarch64_insn_r->reg_rec_count = 1;
3585 }
3586 /* All types of load/store pair instructions decoding. */
3587 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3588 {
3589 if (record_debug)
3590 debug_printf ("Process record: load/store pair\n");
3591
3592 if (ld_flag)
3593 {
3594 if (vector_flag)
3595 {
3596 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3597 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3598 }
3599 else
3600 {
3601 record_buf[0] = reg_rt;
3602 record_buf[1] = reg_rt2;
3603 }
3604 aarch64_insn_r->reg_rec_count = 2;
3605 }
3606 else
3607 {
3608 uint16_t imm7_off;
3609 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3610 if (!vector_flag)
3611 size_bits = size_bits >> 1;
3612 datasize = 8 << (2 + size_bits);
3613 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3614 offset = offset << (2 + size_bits);
3615 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3616 &address);
3617 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3618 {
3619 if (imm7_off & 0x40)
3620 address = address - offset;
3621 else
3622 address = address + offset;
3623 }
3624
3625 record_buf_mem[0] = datasize / 8;
3626 record_buf_mem[1] = address;
3627 record_buf_mem[2] = datasize / 8;
3628 record_buf_mem[3] = address + (datasize / 8);
3629 aarch64_insn_r->mem_rec_count = 2;
3630 }
3631 if (bit (aarch64_insn_r->aarch64_insn, 23))
3632 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3633 }
3634 /* Load/store register (unsigned immediate) instructions. */
3635 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3636 {
3637 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3638 if (!(opc >> 1))
3639 {
3640 if (opc & 0x01)
3641 ld_flag = 0x01;
3642 else
3643 ld_flag = 0x0;
3644 }
3645 else
3646 {
3647 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3648 {
3649 /* PRFM (immediate) */
3650 return AARCH64_RECORD_SUCCESS;
3651 }
3652 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3653 {
3654 /* LDRSW (immediate) */
3655 ld_flag = 0x1;
3656 }
3657 else
3658 {
3659 if (opc & 0x01)
3660 ld_flag = 0x01;
3661 else
3662 ld_flag = 0x0;
3663 }
3664 }
3665
3666 if (record_debug)
3667 {
3668 debug_printf ("Process record: load/store (unsigned immediate):"
3669 " size %x V %d opc %x\n", size_bits, vector_flag,
3670 opc);
3671 }
3672
3673 if (!ld_flag)
3674 {
3675 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3676 datasize = 8 << size_bits;
3677 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3678 &address);
3679 offset = offset << size_bits;
3680 address = address + offset;
3681
3682 record_buf_mem[0] = datasize >> 3;
3683 record_buf_mem[1] = address;
3684 aarch64_insn_r->mem_rec_count = 1;
3685 }
3686 else
3687 {
3688 if (vector_flag)
3689 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3690 else
3691 record_buf[0] = reg_rt;
3692 aarch64_insn_r->reg_rec_count = 1;
3693 }
3694 }
3695 /* Load/store register (register offset) instructions. */
3696 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3697 && insn_bits10_11 == 0x02 && insn_bit21)
3698 {
3699 if (record_debug)
3700 debug_printf ("Process record: load/store (register offset)\n");
3701 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3702 if (!(opc >> 1))
3703 if (opc & 0x01)
3704 ld_flag = 0x01;
3705 else
3706 ld_flag = 0x0;
3707 else
3708 if (size_bits != 0x03)
3709 ld_flag = 0x01;
3710 else
3711 return AARCH64_RECORD_UNKNOWN;
3712
3713 if (!ld_flag)
3714 {
3715 ULONGEST reg_rm_val;
3716
3717 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3718 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3719 if (bit (aarch64_insn_r->aarch64_insn, 12))
3720 offset = reg_rm_val << size_bits;
3721 else
3722 offset = reg_rm_val;
3723 datasize = 8 << size_bits;
3724 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3725 &address);
3726 address = address + offset;
3727 record_buf_mem[0] = datasize >> 3;
3728 record_buf_mem[1] = address;
3729 aarch64_insn_r->mem_rec_count = 1;
3730 }
3731 else
3732 {
3733 if (vector_flag)
3734 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3735 else
3736 record_buf[0] = reg_rt;
3737 aarch64_insn_r->reg_rec_count = 1;
3738 }
3739 }
3740 /* Load/store register (immediate and unprivileged) instructions. */
3741 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3742 && !insn_bit21)
3743 {
3744 if (record_debug)
3745 {
3746 debug_printf ("Process record: load/store "
3747 "(immediate and unprivileged)\n");
3748 }
3749 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3750 if (!(opc >> 1))
3751 if (opc & 0x01)
3752 ld_flag = 0x01;
3753 else
3754 ld_flag = 0x0;
3755 else
3756 if (size_bits != 0x03)
3757 ld_flag = 0x01;
3758 else
3759 return AARCH64_RECORD_UNKNOWN;
3760
3761 if (!ld_flag)
3762 {
3763 uint16_t imm9_off;
3764 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3765 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3766 datasize = 8 << size_bits;
3767 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3768 &address);
3769 if (insn_bits10_11 != 0x01)
3770 {
3771 if (imm9_off & 0x0100)
3772 address = address - offset;
3773 else
3774 address = address + offset;
3775 }
3776 record_buf_mem[0] = datasize >> 3;
3777 record_buf_mem[1] = address;
3778 aarch64_insn_r->mem_rec_count = 1;
3779 }
3780 else
3781 {
3782 if (vector_flag)
3783 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3784 else
3785 record_buf[0] = reg_rt;
3786 aarch64_insn_r->reg_rec_count = 1;
3787 }
3788 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3789 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3790 }
3791 /* Advanced SIMD load/store instructions. */
3792 else
3793 return aarch64_record_asimd_load_store (aarch64_insn_r);
3794
3795 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3796 record_buf_mem);
3797 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3798 record_buf);
3799 return AARCH64_RECORD_SUCCESS;
3800 }
3801
3802 /* Record handler for data processing SIMD and floating point instructions. */
3803
3804 static unsigned int
3805 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3806 {
3807 uint8_t insn_bit21, opcode, rmode, reg_rd;
3808 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3809 uint8_t insn_bits11_14;
3810 uint32_t record_buf[2];
3811
3812 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3813 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3814 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3815 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3816 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3817 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3818 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3819 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3820 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3821
3822 if (record_debug)
3823 debug_printf ("Process record: data processing SIMD/FP: ");
3824
3825 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3826 {
3827 /* Floating point - fixed point conversion instructions. */
3828 if (!insn_bit21)
3829 {
3830 if (record_debug)
3831 debug_printf ("FP - fixed point conversion");
3832
3833 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3834 record_buf[0] = reg_rd;
3835 else
3836 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3837 }
3838 /* Floating point - conditional compare instructions. */
3839 else if (insn_bits10_11 == 0x01)
3840 {
3841 if (record_debug)
3842 debug_printf ("FP - conditional compare");
3843
3844 record_buf[0] = AARCH64_CPSR_REGNUM;
3845 }
3846 /* Floating point - data processing (2-source) and
3847 conditional select instructions. */
3848 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3849 {
3850 if (record_debug)
3851 debug_printf ("FP - DP (2-source)");
3852
3853 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3854 }
3855 else if (insn_bits10_11 == 0x00)
3856 {
3857 /* Floating point - immediate instructions. */
3858 if ((insn_bits12_15 & 0x01) == 0x01
3859 || (insn_bits12_15 & 0x07) == 0x04)
3860 {
3861 if (record_debug)
3862 debug_printf ("FP - immediate");
3863 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3864 }
3865 /* Floating point - compare instructions. */
3866 else if ((insn_bits12_15 & 0x03) == 0x02)
3867 {
3868 if (record_debug)
3869 debug_printf ("FP - immediate");
3870 record_buf[0] = AARCH64_CPSR_REGNUM;
3871 }
3872 /* Floating point - integer conversions instructions. */
3873 else if (insn_bits12_15 == 0x00)
3874 {
3875 /* Convert float to integer instruction. */
3876 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3877 {
3878 if (record_debug)
3879 debug_printf ("float to int conversion");
3880
3881 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3882 }
3883 /* Convert integer to float instruction. */
3884 else if ((opcode >> 1) == 0x01 && !rmode)
3885 {
3886 if (record_debug)
3887 debug_printf ("int to float conversion");
3888
3889 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3890 }
3891 /* Move float to integer instruction. */
3892 else if ((opcode >> 1) == 0x03)
3893 {
3894 if (record_debug)
3895 debug_printf ("move float to int");
3896
3897 if (!(opcode & 0x01))
3898 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3899 else
3900 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3901 }
3902 else
3903 return AARCH64_RECORD_UNKNOWN;
3904 }
3905 else
3906 return AARCH64_RECORD_UNKNOWN;
3907 }
3908 else
3909 return AARCH64_RECORD_UNKNOWN;
3910 }
3911 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3912 {
3913 if (record_debug)
3914 debug_printf ("SIMD copy");
3915
3916 /* Advanced SIMD copy instructions. */
3917 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3918 && !bit (aarch64_insn_r->aarch64_insn, 15)
3919 && bit (aarch64_insn_r->aarch64_insn, 10))
3920 {
3921 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3922 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3923 else
3924 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3925 }
3926 else
3927 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3928 }
3929 /* All remaining floating point or advanced SIMD instructions. */
3930 else
3931 {
3932 if (record_debug)
3933 debug_printf ("all remain");
3934
3935 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3936 }
3937
3938 if (record_debug)
3939 debug_printf ("\n");
3940
3941 aarch64_insn_r->reg_rec_count++;
3942 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3943 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3944 record_buf);
3945 return AARCH64_RECORD_SUCCESS;
3946 }
3947
3948 /* Decodes insns type and invokes its record handler. */
3949
3950 static unsigned int
3951 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3952 {
3953 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3954
3955 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3956 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3957 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3958 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3959
3960 /* Data processing - immediate instructions. */
3961 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3962 return aarch64_record_data_proc_imm (aarch64_insn_r);
3963
3964 /* Branch, exception generation and system instructions. */
3965 if (ins_bit26 && !ins_bit27 && ins_bit28)
3966 return aarch64_record_branch_except_sys (aarch64_insn_r);
3967
3968 /* Load and store instructions. */
3969 if (!ins_bit25 && ins_bit27)
3970 return aarch64_record_load_store (aarch64_insn_r);
3971
3972 /* Data processing - register instructions. */
3973 if (ins_bit25 && !ins_bit26 && ins_bit27)
3974 return aarch64_record_data_proc_reg (aarch64_insn_r);
3975
3976 /* Data processing - SIMD and floating point instructions. */
3977 if (ins_bit25 && ins_bit26 && ins_bit27)
3978 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3979
3980 return AARCH64_RECORD_UNSUPPORTED;
3981 }
3982
3983 /* Cleans up local record registers and memory allocations. */
3984
3985 static void
3986 deallocate_reg_mem (insn_decode_record *record)
3987 {
3988 xfree (record->aarch64_regs);
3989 xfree (record->aarch64_mems);
3990 }
3991
3992 #if GDB_SELF_TEST
3993 namespace selftests {
3994
3995 static void
3996 aarch64_process_record_test (void)
3997 {
3998 struct gdbarch_info info;
3999 uint32_t ret;
4000
4001 gdbarch_info_init (&info);
4002 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4003
4004 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4005 SELF_CHECK (gdbarch != NULL);
4006
4007 insn_decode_record aarch64_record;
4008
4009 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4010 aarch64_record.regcache = NULL;
4011 aarch64_record.this_addr = 0;
4012 aarch64_record.gdbarch = gdbarch;
4013
4014 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4015 aarch64_record.aarch64_insn = 0xf9800020;
4016 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4017 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4018 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4019 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4020
4021 deallocate_reg_mem (&aarch64_record);
4022 }
4023
4024 } // namespace selftests
4025 #endif /* GDB_SELF_TEST */
4026
4027 /* Parse the current instruction and record the values of the registers and
4028 memory that will be changed in current instruction to record_arch_list
4029 return -1 if something is wrong. */
4030
4031 int
4032 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4033 CORE_ADDR insn_addr)
4034 {
4035 uint32_t rec_no = 0;
4036 uint8_t insn_size = 4;
4037 uint32_t ret = 0;
4038 gdb_byte buf[insn_size];
4039 insn_decode_record aarch64_record;
4040
4041 memset (&buf[0], 0, insn_size);
4042 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4043 target_read_memory (insn_addr, &buf[0], insn_size);
4044 aarch64_record.aarch64_insn
4045 = (uint32_t) extract_unsigned_integer (&buf[0],
4046 insn_size,
4047 gdbarch_byte_order (gdbarch));
4048 aarch64_record.regcache = regcache;
4049 aarch64_record.this_addr = insn_addr;
4050 aarch64_record.gdbarch = gdbarch;
4051
4052 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4053 if (ret == AARCH64_RECORD_UNSUPPORTED)
4054 {
4055 printf_unfiltered (_("Process record does not support instruction "
4056 "0x%0x at address %s.\n"),
4057 aarch64_record.aarch64_insn,
4058 paddress (gdbarch, insn_addr));
4059 ret = -1;
4060 }
4061
4062 if (0 == ret)
4063 {
4064 /* Record registers. */
4065 record_full_arch_list_add_reg (aarch64_record.regcache,
4066 AARCH64_PC_REGNUM);
4067 /* Always record register CPSR. */
4068 record_full_arch_list_add_reg (aarch64_record.regcache,
4069 AARCH64_CPSR_REGNUM);
4070 if (aarch64_record.aarch64_regs)
4071 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4072 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4073 aarch64_record.aarch64_regs[rec_no]))
4074 ret = -1;
4075
4076 /* Record memories. */
4077 if (aarch64_record.aarch64_mems)
4078 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4079 if (record_full_arch_list_add_mem
4080 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4081 aarch64_record.aarch64_mems[rec_no].len))
4082 ret = -1;
4083
4084 if (record_full_arch_list_add_end ())
4085 ret = -1;
4086 }
4087
4088 deallocate_reg_mem (&aarch64_record);
4089 return ret;
4090 }
This page took 0.113469 seconds and 5 git commands to generate.