[AArch64] Refactor aarch64_make_prologue_cache
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
45
46#include "aarch64-tdep.h"
47
48#include "elf-bfd.h"
49#include "elf/aarch64.h"
50
07b287a0
MS
51#include "vec.h"
52
99afc88b
OJ
53#include "record.h"
54#include "record-full.h"
55
07b287a0 56#include "features/aarch64.c"
07b287a0
MS
57
58/* Pseudo register base numbers. */
59#define AARCH64_Q0_REGNUM 0
60#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
64
65/* The standard register names, and all the valid aliases for them. */
66static const struct
67{
68 const char *const name;
69 int regnum;
70} aarch64_register_aliases[] =
71{
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
76
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
109
110 /* specials */
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
113};
114
115/* The required core 'R' registers. */
116static const char *const aarch64_r_register_names[] =
117{
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
128 "pc", "cpsr"
129};
130
131/* The FP/SIMD 'V' registers. */
132static const char *const aarch64_v_register_names[] =
133{
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
144 "fpsr",
145 "fpcr"
146};
147
148/* AArch64 prologue cache structure. */
149struct aarch64_prologue_cache
150{
151 /* The stack pointer at the time this frame was created; i.e. the
152 caller's stack pointer when this function was called. It is used
153 to identify this frame. */
154 CORE_ADDR prev_sp;
155
156 /* The frame base for this frame is just prev_sp - frame size.
157 FRAMESIZE is the distance from the frame pointer to the
158 initial stack pointer. */
159 int framesize;
160
161 /* The register used to hold the frame pointer for this frame. */
162 int framereg;
163
164 /* Saved register offsets. */
165 struct trad_frame_saved_reg *saved_regs;
166};
167
168/* Toggle this file's internal debugging dump. */
169static int aarch64_debug;
170
171static void
172show_aarch64_debug (struct ui_file *file, int from_tty,
173 struct cmd_list_element *c, const char *value)
174{
175 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
176}
177
178/* Extract a signed value from a bit field within an instruction
179 encoding.
180
181 INSN is the instruction opcode.
182
183 WIDTH specifies the width of the bit field to extract (in bits).
184
185 OFFSET specifies the least significant bit of the field where bits
186 are numbered zero counting from least to most significant. */
187
188static int32_t
189extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
190{
191 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
192 unsigned shift_r = sizeof (int32_t) * 8 - width;
193
194 return ((int32_t) insn << shift_l) >> shift_r;
195}
196
197/* Determine if specified bits within an instruction opcode matches a
198 specific pattern.
199
200 INSN is the instruction opcode.
201
202 MASK specifies the bits within the opcode that are to be tested
203 agsinst for a match with PATTERN. */
204
205static int
206decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
207{
208 return (insn & mask) == pattern;
209}
210
211/* Decode an opcode if it represents an immediate ADD or SUB instruction.
212
213 ADDR specifies the address of the opcode.
214 INSN specifies the opcode to test.
215 RD receives the 'rd' field from the decoded instruction.
216 RN receives the 'rn' field from the decoded instruction.
217
218 Return 1 if the opcodes matches and is decoded, otherwise 0. */
219static int
220decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
221 int32_t *imm)
222{
223 if ((insn & 0x9f000000) == 0x91000000)
224 {
225 unsigned shift;
226 unsigned op_is_sub;
227
228 *rd = (insn >> 0) & 0x1f;
229 *rn = (insn >> 5) & 0x1f;
230 *imm = (insn >> 10) & 0xfff;
231 shift = (insn >> 22) & 0x3;
232 op_is_sub = (insn >> 30) & 0x1;
233
234 switch (shift)
235 {
236 case 0:
237 break;
238 case 1:
239 *imm <<= 12;
240 break;
241 default:
242 /* UNDEFINED */
243 return 0;
244 }
245
246 if (op_is_sub)
247 *imm = -*imm;
248
249 if (aarch64_debug)
250 fprintf_unfiltered (gdb_stdlog,
251 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
252 core_addr_to_string_nz (addr), insn, *rd, *rn,
253 *imm);
254 return 1;
255 }
256 return 0;
257}
258
259/* Decode an opcode if it represents an ADRP instruction.
260
261 ADDR specifies the address of the opcode.
262 INSN specifies the opcode to test.
263 RD receives the 'rd' field from the decoded instruction.
264
265 Return 1 if the opcodes matches and is decoded, otherwise 0. */
266
267static int
268decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
269{
270 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
271 {
272 *rd = (insn >> 0) & 0x1f;
273
274 if (aarch64_debug)
275 fprintf_unfiltered (gdb_stdlog,
276 "decode: 0x%s 0x%x adrp x%u, #?\n",
277 core_addr_to_string_nz (addr), insn, *rd);
278 return 1;
279 }
280 return 0;
281}
282
283/* Decode an opcode if it represents an branch immediate or branch
284 and link immediate instruction.
285
286 ADDR specifies the address of the opcode.
287 INSN specifies the opcode to test.
288 LINK receives the 'link' bit from the decoded instruction.
289 OFFSET receives the immediate offset from the decoded instruction.
290
291 Return 1 if the opcodes matches and is decoded, otherwise 0. */
292
293static int
294decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
295{
296 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
297 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
298 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
299 {
300 *link = insn >> 31;
301 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
302
303 if (aarch64_debug)
304 fprintf_unfiltered (gdb_stdlog,
305 "decode: 0x%s 0x%x %s 0x%s\n",
306 core_addr_to_string_nz (addr), insn,
307 *link ? "bl" : "b",
308 core_addr_to_string_nz (addr + *offset));
309
310 return 1;
311 }
312 return 0;
313}
314
315/* Decode an opcode if it represents a conditional branch instruction.
316
317 ADDR specifies the address of the opcode.
318 INSN specifies the opcode to test.
319 COND receives the branch condition field from the decoded
320 instruction.
321 OFFSET receives the immediate offset from the decoded instruction.
322
323 Return 1 if the opcodes matches and is decoded, otherwise 0. */
324
325static int
326decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
327{
328 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
329 {
330 *cond = (insn >> 0) & 0xf;
331 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
332
333 if (aarch64_debug)
334 fprintf_unfiltered (gdb_stdlog,
335 "decode: 0x%s 0x%x b<%u> 0x%s\n",
336 core_addr_to_string_nz (addr), insn, *cond,
337 core_addr_to_string_nz (addr + *offset));
338 return 1;
339 }
340 return 0;
341}
342
343/* Decode an opcode if it represents a branch via register instruction.
344
345 ADDR specifies the address of the opcode.
346 INSN specifies the opcode to test.
347 LINK receives the 'link' bit from the decoded instruction.
348 RN receives the 'rn' field from the decoded instruction.
349
350 Return 1 if the opcodes matches and is decoded, otherwise 0. */
351
352static int
353decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
354{
355 /* 8 4 0 6 2 8 4 0 */
356 /* blr 110101100011111100000000000rrrrr */
357 /* br 110101100001111100000000000rrrrr */
358 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
359 {
360 *link = (insn >> 21) & 1;
361 *rn = (insn >> 5) & 0x1f;
362
363 if (aarch64_debug)
364 fprintf_unfiltered (gdb_stdlog,
365 "decode: 0x%s 0x%x %s 0x%x\n",
366 core_addr_to_string_nz (addr), insn,
367 *link ? "blr" : "br", *rn);
368
369 return 1;
370 }
371 return 0;
372}
373
374/* Decode an opcode if it represents a CBZ or CBNZ instruction.
375
376 ADDR specifies the address of the opcode.
377 INSN specifies the opcode to test.
378 IS64 receives the 'sf' field from the decoded instruction.
379 OP receives the 'op' field from the decoded instruction.
380 RN receives the 'rn' field from the decoded instruction.
381 OFFSET receives the 'imm19' field from the decoded instruction.
382
383 Return 1 if the opcodes matches and is decoded, otherwise 0. */
384
385static int
386decode_cb (CORE_ADDR addr,
387 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
388 int32_t *offset)
389{
390 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
391 {
392 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
393 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
394
395 *rn = (insn >> 0) & 0x1f;
396 *is64 = (insn >> 31) & 0x1;
397 *op = (insn >> 24) & 0x1;
398 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
399
400 if (aarch64_debug)
401 fprintf_unfiltered (gdb_stdlog,
402 "decode: 0x%s 0x%x %s 0x%s\n",
403 core_addr_to_string_nz (addr), insn,
404 *op ? "cbnz" : "cbz",
405 core_addr_to_string_nz (addr + *offset));
406 return 1;
407 }
408 return 0;
409}
410
411/* Decode an opcode if it represents a ERET instruction.
412
413 ADDR specifies the address of the opcode.
414 INSN specifies the opcode to test.
415
416 Return 1 if the opcodes matches and is decoded, otherwise 0. */
417
418static int
419decode_eret (CORE_ADDR addr, uint32_t insn)
420{
421 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
422 if (insn == 0xd69f03e0)
423 {
424 if (aarch64_debug)
425 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
426 core_addr_to_string_nz (addr), insn);
427 return 1;
428 }
429 return 0;
430}
431
432/* Decode an opcode if it represents a MOVZ instruction.
433
434 ADDR specifies the address of the opcode.
435 INSN specifies the opcode to test.
436 RD receives the 'rd' field from the decoded instruction.
437
438 Return 1 if the opcodes matches and is decoded, otherwise 0. */
439
440static int
441decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
442{
443 if (decode_masked_match (insn, 0xff800000, 0x52800000))
444 {
445 *rd = (insn >> 0) & 0x1f;
446
447 if (aarch64_debug)
448 fprintf_unfiltered (gdb_stdlog,
449 "decode: 0x%s 0x%x movz x%u, #?\n",
450 core_addr_to_string_nz (addr), insn, *rd);
451 return 1;
452 }
453 return 0;
454}
455
456/* Decode an opcode if it represents a ORR (shifted register)
457 instruction.
458
459 ADDR specifies the address of the opcode.
460 INSN specifies the opcode to test.
461 RD receives the 'rd' field from the decoded instruction.
462 RN receives the 'rn' field from the decoded instruction.
463 RM receives the 'rm' field from the decoded instruction.
464 IMM receives the 'imm6' field from the decoded instruction.
465
466 Return 1 if the opcodes matches and is decoded, otherwise 0. */
467
468static int
469decode_orr_shifted_register_x (CORE_ADDR addr,
470 uint32_t insn, unsigned *rd, unsigned *rn,
471 unsigned *rm, int32_t *imm)
472{
473 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
474 {
475 *rd = (insn >> 0) & 0x1f;
476 *rn = (insn >> 5) & 0x1f;
477 *rm = (insn >> 16) & 0x1f;
478 *imm = (insn >> 10) & 0x3f;
479
480 if (aarch64_debug)
481 fprintf_unfiltered (gdb_stdlog,
482 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
483 core_addr_to_string_nz (addr), insn, *rd,
484 *rn, *rm, *imm);
485 return 1;
486 }
487 return 0;
488}
489
490/* Decode an opcode if it represents a RET instruction.
491
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 RN receives the 'rn' field from the decoded instruction.
495
496 Return 1 if the opcodes matches and is decoded, otherwise 0. */
497
498static int
499decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
500{
501 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
502 {
503 *rn = (insn >> 5) & 0x1f;
504 if (aarch64_debug)
505 fprintf_unfiltered (gdb_stdlog,
506 "decode: 0x%s 0x%x ret x%u\n",
507 core_addr_to_string_nz (addr), insn, *rn);
508 return 1;
509 }
510 return 0;
511}
512
513/* Decode an opcode if it represents the following instruction:
514 STP rt, rt2, [rn, #imm]
515
516 ADDR specifies the address of the opcode.
517 INSN specifies the opcode to test.
518 RT1 receives the 'rt' field from the decoded instruction.
519 RT2 receives the 'rt2' field from the decoded instruction.
520 RN receives the 'rn' field from the decoded instruction.
521 IMM receives the 'imm' field from the decoded instruction.
522
523 Return 1 if the opcodes matches and is decoded, otherwise 0. */
524
525static int
526decode_stp_offset (CORE_ADDR addr,
527 uint32_t insn,
528 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
529{
530 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
531 {
532 *rt1 = (insn >> 0) & 0x1f;
533 *rn = (insn >> 5) & 0x1f;
534 *rt2 = (insn >> 10) & 0x1f;
535 *imm = extract_signed_bitfield (insn, 7, 15);
536 *imm <<= 3;
537
538 if (aarch64_debug)
539 fprintf_unfiltered (gdb_stdlog,
540 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
541 core_addr_to_string_nz (addr), insn,
542 *rt1, *rt2, *rn, *imm);
543 return 1;
544 }
545 return 0;
546}
547
548/* Decode an opcode if it represents the following instruction:
549 STP rt, rt2, [rn, #imm]!
550
551 ADDR specifies the address of the opcode.
552 INSN specifies the opcode to test.
553 RT1 receives the 'rt' field from the decoded instruction.
554 RT2 receives the 'rt2' field from the decoded instruction.
555 RN receives the 'rn' field from the decoded instruction.
556 IMM receives the 'imm' field from the decoded instruction.
557
558 Return 1 if the opcodes matches and is decoded, otherwise 0. */
559
560static int
561decode_stp_offset_wb (CORE_ADDR addr,
562 uint32_t insn,
563 unsigned *rt1, unsigned *rt2, unsigned *rn,
564 int32_t *imm)
565{
566 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
567 {
568 *rt1 = (insn >> 0) & 0x1f;
569 *rn = (insn >> 5) & 0x1f;
570 *rt2 = (insn >> 10) & 0x1f;
571 *imm = extract_signed_bitfield (insn, 7, 15);
572 *imm <<= 3;
573
574 if (aarch64_debug)
575 fprintf_unfiltered (gdb_stdlog,
576 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
577 core_addr_to_string_nz (addr), insn,
578 *rt1, *rt2, *rn, *imm);
579 return 1;
580 }
581 return 0;
582}
583
584/* Decode an opcode if it represents the following instruction:
585 STUR rt, [rn, #imm]
586
587 ADDR specifies the address of the opcode.
588 INSN specifies the opcode to test.
589 IS64 receives size field from the decoded instruction.
590 RT receives the 'rt' field from the decoded instruction.
591 RN receives the 'rn' field from the decoded instruction.
592 IMM receives the 'imm' field from the decoded instruction.
593
594 Return 1 if the opcodes matches and is decoded, otherwise 0. */
595
596static int
597decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
598 unsigned *rn, int32_t *imm)
599{
600 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
601 {
602 *is64 = (insn >> 30) & 1;
603 *rt = (insn >> 0) & 0x1f;
604 *rn = (insn >> 5) & 0x1f;
605 *imm = extract_signed_bitfield (insn, 9, 12);
606
607 if (aarch64_debug)
608 fprintf_unfiltered (gdb_stdlog,
609 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
610 core_addr_to_string_nz (addr), insn,
611 *is64 ? 'x' : 'w', *rt, *rn, *imm);
612 return 1;
613 }
614 return 0;
615}
616
617/* Decode an opcode if it represents a TB or TBNZ instruction.
618
619 ADDR specifies the address of the opcode.
620 INSN specifies the opcode to test.
621 OP receives the 'op' field from the decoded instruction.
622 BIT receives the bit position field from the decoded instruction.
623 RT receives 'rt' field from the decoded instruction.
624 IMM receives 'imm' field from the decoded instruction.
625
626 Return 1 if the opcodes matches and is decoded, otherwise 0. */
627
628static int
629decode_tb (CORE_ADDR addr,
630 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
631 int32_t *imm)
632{
633 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
634 {
635 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
636 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
637
638 *rt = (insn >> 0) & 0x1f;
639 *op = insn & (1 << 24);
640 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
641 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
642
643 if (aarch64_debug)
644 fprintf_unfiltered (gdb_stdlog,
645 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
646 core_addr_to_string_nz (addr), insn,
647 *op ? "tbnz" : "tbz", *rt, *bit,
648 core_addr_to_string_nz (addr + *imm));
649 return 1;
650 }
651 return 0;
652}
653
654/* Analyze a prologue, looking for a recognizable stack frame
655 and frame pointer. Scan until we encounter a store that could
656 clobber the stack frame unexpectedly, or an unknown instruction. */
657
658static CORE_ADDR
659aarch64_analyze_prologue (struct gdbarch *gdbarch,
660 CORE_ADDR start, CORE_ADDR limit,
661 struct aarch64_prologue_cache *cache)
662{
663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
664 int i;
665 pv_t regs[AARCH64_X_REGISTER_COUNT];
666 struct pv_area *stack;
667 struct cleanup *back_to;
668
669 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
670 regs[i] = pv_register (i, 0);
671 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
672 back_to = make_cleanup_free_pv_area (stack);
673
674 for (; start < limit; start += 4)
675 {
676 uint32_t insn;
677 unsigned rd;
678 unsigned rn;
679 unsigned rm;
680 unsigned rt;
681 unsigned rt1;
682 unsigned rt2;
683 int op_is_sub;
684 int32_t imm;
685 unsigned cond;
96b32e50 686 int is64;
07b287a0
MS
687 unsigned is_link;
688 unsigned op;
689 unsigned bit;
690 int32_t offset;
691
692 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
693
694 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
695 regs[rd] = pv_add_constant (regs[rn], imm);
696 else if (decode_adrp (start, insn, &rd))
697 regs[rd] = pv_unknown ();
698 else if (decode_b (start, insn, &is_link, &offset))
699 {
700 /* Stop analysis on branch. */
701 break;
702 }
703 else if (decode_bcond (start, insn, &cond, &offset))
704 {
705 /* Stop analysis on branch. */
706 break;
707 }
708 else if (decode_br (start, insn, &is_link, &rn))
709 {
710 /* Stop analysis on branch. */
711 break;
712 }
713 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
714 {
715 /* Stop analysis on branch. */
716 break;
717 }
718 else if (decode_eret (start, insn))
719 {
720 /* Stop analysis on branch. */
721 break;
722 }
723 else if (decode_movz (start, insn, &rd))
724 regs[rd] = pv_unknown ();
725 else
726 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
727 {
728 if (imm == 0 && rn == 31)
729 regs[rd] = regs[rm];
730 else
731 {
732 if (aarch64_debug)
733 fprintf_unfiltered
734 (gdb_stdlog,
735 "aarch64: prologue analysis gave up addr=0x%s "
736 "opcode=0x%x (orr x register)\n",
737 core_addr_to_string_nz (start),
738 insn);
739 break;
740 }
741 }
742 else if (decode_ret (start, insn, &rn))
743 {
744 /* Stop analysis on branch. */
745 break;
746 }
747 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
748 {
749 pv_area_store (stack, pv_add_constant (regs[rn], offset),
750 is64 ? 8 : 4, regs[rt]);
751 }
752 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
753 {
754 /* If recording this store would invalidate the store area
755 (perhaps because rn is not known) then we should abandon
756 further prologue analysis. */
757 if (pv_area_store_would_trash (stack,
758 pv_add_constant (regs[rn], imm)))
759 break;
760
761 if (pv_area_store_would_trash (stack,
762 pv_add_constant (regs[rn], imm + 8)))
763 break;
764
765 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
766 regs[rt1]);
767 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
768 regs[rt2]);
769 }
770 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
771 {
772 /* If recording this store would invalidate the store area
773 (perhaps because rn is not known) then we should abandon
774 further prologue analysis. */
775 if (pv_area_store_would_trash (stack,
14ac654f
MS
776 pv_add_constant (regs[rn], imm)))
777 break;
778
779 if (pv_area_store_would_trash (stack,
07b287a0
MS
780 pv_add_constant (regs[rn], imm + 8)))
781 break;
782
783 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
784 regs[rt1]);
785 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
786 regs[rt2]);
787 regs[rn] = pv_add_constant (regs[rn], imm);
788 }
789 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
790 {
791 /* Stop analysis on branch. */
792 break;
793 }
794 else
795 {
796 if (aarch64_debug)
797 fprintf_unfiltered (gdb_stdlog,
798 "aarch64: prologue analysis gave up addr=0x%s"
799 " opcode=0x%x\n",
800 core_addr_to_string_nz (start), insn);
801 break;
802 }
803 }
804
805 if (cache == NULL)
806 {
807 do_cleanups (back_to);
808 return start;
809 }
810
811 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
812 {
813 /* Frame pointer is fp. Frame size is constant. */
814 cache->framereg = AARCH64_FP_REGNUM;
815 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
816 }
817 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
818 {
819 /* Try the stack pointer. */
820 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
821 cache->framereg = AARCH64_SP_REGNUM;
822 }
823 else
824 {
825 /* We're just out of luck. We don't know where the frame is. */
826 cache->framereg = -1;
827 cache->framesize = 0;
828 }
829
830 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
831 {
832 CORE_ADDR offset;
833
834 if (pv_area_find_reg (stack, gdbarch, i, &offset))
835 cache->saved_regs[i].addr = offset;
836 }
837
838 do_cleanups (back_to);
839 return start;
840}
841
842/* Implement the "skip_prologue" gdbarch method. */
843
844static CORE_ADDR
845aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
846{
847 unsigned long inst;
848 CORE_ADDR skip_pc;
849 CORE_ADDR func_addr, limit_pc;
850 struct symtab_and_line sal;
851
852 /* See if we can determine the end of the prologue via the symbol
853 table. If so, then return either PC, or the PC after the
854 prologue, whichever is greater. */
855 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
856 {
857 CORE_ADDR post_prologue_pc
858 = skip_prologue_using_sal (gdbarch, func_addr);
859
860 if (post_prologue_pc != 0)
861 return max (pc, post_prologue_pc);
862 }
863
864 /* Can't determine prologue from the symbol table, need to examine
865 instructions. */
866
867 /* Find an upper limit on the function prologue using the debug
868 information. If the debug information could not be used to
869 provide that bound, then use an arbitrary large number as the
870 upper bound. */
871 limit_pc = skip_prologue_using_sal (gdbarch, pc);
872 if (limit_pc == 0)
873 limit_pc = pc + 128; /* Magic. */
874
875 /* Try disassembling prologue. */
876 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
877}
878
879/* Scan the function prologue for THIS_FRAME and populate the prologue
880 cache CACHE. */
881
882static void
883aarch64_scan_prologue (struct frame_info *this_frame,
884 struct aarch64_prologue_cache *cache)
885{
886 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
887 CORE_ADDR prologue_start;
888 CORE_ADDR prologue_end;
889 CORE_ADDR prev_pc = get_frame_pc (this_frame);
890 struct gdbarch *gdbarch = get_frame_arch (this_frame);
891
892 /* Assume we do not find a frame. */
893 cache->framereg = -1;
894 cache->framesize = 0;
895
896 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
897 &prologue_end))
898 {
899 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
900
901 if (sal.line == 0)
902 {
903 /* No line info so use the current PC. */
904 prologue_end = prev_pc;
905 }
906 else if (sal.end < prologue_end)
907 {
908 /* The next line begins after the function end. */
909 prologue_end = sal.end;
910 }
911
912 prologue_end = min (prologue_end, prev_pc);
913 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
914 }
915 else
916 {
917 CORE_ADDR frame_loc;
918 LONGEST saved_fp;
919 LONGEST saved_lr;
920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
921
922 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
923 if (frame_loc == 0)
924 return;
925
926 cache->framereg = AARCH64_FP_REGNUM;
927 cache->framesize = 16;
928 cache->saved_regs[29].addr = 0;
929 cache->saved_regs[30].addr = 8;
930 }
931}
932
7c8edfae
PL
933/* Allocate and fill in *THIS_CACHE with information about the prologue of
934 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
935 Return a pointer to the current aarch64_prologue_cache in
936 *THIS_CACHE. */
07b287a0
MS
937
938static struct aarch64_prologue_cache *
7c8edfae 939aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
07b287a0
MS
940{
941 struct aarch64_prologue_cache *cache;
942 CORE_ADDR unwound_fp;
943 int reg;
944
7c8edfae
PL
945 if (*this_cache != NULL)
946 return *this_cache;
947
07b287a0
MS
948 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
949 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
7c8edfae 950 *this_cache = cache;
07b287a0
MS
951
952 aarch64_scan_prologue (this_frame, cache);
953
954 if (cache->framereg == -1)
955 return cache;
956
957 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
958 if (unwound_fp == 0)
959 return cache;
960
961 cache->prev_sp = unwound_fp + cache->framesize;
962
963 /* Calculate actual addresses of saved registers using offsets
964 determined by aarch64_analyze_prologue. */
965 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
966 if (trad_frame_addr_p (cache->saved_regs, reg))
967 cache->saved_regs[reg].addr += cache->prev_sp;
968
969 return cache;
970}
971
972/* Our frame ID for a normal frame is the current function's starting
973 PC and the caller's SP when we were called. */
974
975static void
976aarch64_prologue_this_id (struct frame_info *this_frame,
977 void **this_cache, struct frame_id *this_id)
978{
7c8edfae
PL
979 struct aarch64_prologue_cache *cache
980 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
981 struct frame_id id;
982 CORE_ADDR pc, func;
983
07b287a0
MS
984 /* This is meant to halt the backtrace at "_start". */
985 pc = get_frame_pc (this_frame);
986 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
987 return;
988
989 /* If we've hit a wall, stop. */
990 if (cache->prev_sp == 0)
991 return;
992
993 func = get_frame_func (this_frame);
994 id = frame_id_build (cache->prev_sp, func);
995 *this_id = id;
996}
997
998/* Implement the "prev_register" frame_unwind method. */
999
1000static struct value *
1001aarch64_prologue_prev_register (struct frame_info *this_frame,
1002 void **this_cache, int prev_regnum)
1003{
1004 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
1005 struct aarch64_prologue_cache *cache
1006 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1007
1008 /* If we are asked to unwind the PC, then we need to return the LR
1009 instead. The prologue may save PC, but it will point into this
1010 frame's prologue, not the next frame's resume location. */
1011 if (prev_regnum == AARCH64_PC_REGNUM)
1012 {
1013 CORE_ADDR lr;
1014
1015 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1016 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1017 }
1018
1019 /* SP is generally not saved to the stack, but this frame is
1020 identified by the next frame's stack pointer at the time of the
1021 call. The value was already reconstructed into PREV_SP. */
1022 /*
1023 +----------+ ^
1024 | saved lr | |
1025 +->| saved fp |--+
1026 | | |
1027 | | | <- Previous SP
1028 | +----------+
1029 | | saved lr |
1030 +--| saved fp |<- FP
1031 | |
1032 | |<- SP
1033 +----------+ */
1034 if (prev_regnum == AARCH64_SP_REGNUM)
1035 return frame_unwind_got_constant (this_frame, prev_regnum,
1036 cache->prev_sp);
1037
1038 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1039 prev_regnum);
1040}
1041
1042/* AArch64 prologue unwinder. */
1043struct frame_unwind aarch64_prologue_unwind =
1044{
1045 NORMAL_FRAME,
1046 default_frame_unwind_stop_reason,
1047 aarch64_prologue_this_id,
1048 aarch64_prologue_prev_register,
1049 NULL,
1050 default_frame_sniffer
1051};
1052
1053/* Allocate an aarch64_prologue_cache and fill it with information
1054 about the prologue of *THIS_FRAME. */
1055
1056static struct aarch64_prologue_cache *
1057aarch64_make_stub_cache (struct frame_info *this_frame)
1058{
1059 int reg;
1060 struct aarch64_prologue_cache *cache;
1061 CORE_ADDR unwound_fp;
1062
1063 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1064 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1065
1066 cache->prev_sp
1067 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1068
1069 return cache;
1070}
1071
1072/* Our frame ID for a stub frame is the current SP and LR. */
1073
1074static void
1075aarch64_stub_this_id (struct frame_info *this_frame,
1076 void **this_cache, struct frame_id *this_id)
1077{
1078 struct aarch64_prologue_cache *cache;
1079
1080 if (*this_cache == NULL)
1081 *this_cache = aarch64_make_stub_cache (this_frame);
1082 cache = *this_cache;
1083
1084 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1085}
1086
1087/* Implement the "sniffer" frame_unwind method. */
1088
1089static int
1090aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1091 struct frame_info *this_frame,
1092 void **this_prologue_cache)
1093{
1094 CORE_ADDR addr_in_block;
1095 gdb_byte dummy[4];
1096
1097 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1098 if (in_plt_section (addr_in_block)
07b287a0
MS
1099 /* We also use the stub winder if the target memory is unreadable
1100 to avoid having the prologue unwinder trying to read it. */
1101 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1102 return 1;
1103
1104 return 0;
1105}
1106
1107/* AArch64 stub unwinder. */
1108struct frame_unwind aarch64_stub_unwind =
1109{
1110 NORMAL_FRAME,
1111 default_frame_unwind_stop_reason,
1112 aarch64_stub_this_id,
1113 aarch64_prologue_prev_register,
1114 NULL,
1115 aarch64_stub_unwind_sniffer
1116};
1117
1118/* Return the frame base address of *THIS_FRAME. */
1119
1120static CORE_ADDR
1121aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1122{
7c8edfae
PL
1123 struct aarch64_prologue_cache *cache
1124 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1125
1126 return cache->prev_sp - cache->framesize;
1127}
1128
1129/* AArch64 default frame base information. */
1130struct frame_base aarch64_normal_base =
1131{
1132 &aarch64_prologue_unwind,
1133 aarch64_normal_frame_base,
1134 aarch64_normal_frame_base,
1135 aarch64_normal_frame_base
1136};
1137
1138/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1139 dummy frame. The frame ID's base needs to match the TOS value
1140 saved by save_dummy_frame_tos () and returned from
1141 aarch64_push_dummy_call, and the PC needs to match the dummy
1142 frame's breakpoint. */
1143
1144static struct frame_id
1145aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1146{
1147 return frame_id_build (get_frame_register_unsigned (this_frame,
1148 AARCH64_SP_REGNUM),
1149 get_frame_pc (this_frame));
1150}
1151
1152/* Implement the "unwind_pc" gdbarch method. */
1153
1154static CORE_ADDR
1155aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1156{
1157 CORE_ADDR pc
1158 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1159
1160 return pc;
1161}
1162
1163/* Implement the "unwind_sp" gdbarch method. */
1164
1165static CORE_ADDR
1166aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1167{
1168 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1169}
1170
1171/* Return the value of the REGNUM register in the previous frame of
1172 *THIS_FRAME. */
1173
1174static struct value *
1175aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1176 void **this_cache, int regnum)
1177{
1178 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1179 CORE_ADDR lr;
1180
1181 switch (regnum)
1182 {
1183 case AARCH64_PC_REGNUM:
1184 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1185 return frame_unwind_got_constant (this_frame, regnum, lr);
1186
1187 default:
1188 internal_error (__FILE__, __LINE__,
1189 _("Unexpected register %d"), regnum);
1190 }
1191}
1192
1193/* Implement the "init_reg" dwarf2_frame_ops method. */
1194
1195static void
1196aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1197 struct dwarf2_frame_state_reg *reg,
1198 struct frame_info *this_frame)
1199{
1200 switch (regnum)
1201 {
1202 case AARCH64_PC_REGNUM:
1203 reg->how = DWARF2_FRAME_REG_FN;
1204 reg->loc.fn = aarch64_dwarf2_prev_register;
1205 break;
1206 case AARCH64_SP_REGNUM:
1207 reg->how = DWARF2_FRAME_REG_CFA;
1208 break;
1209 }
1210}
1211
1212/* When arguments must be pushed onto the stack, they go on in reverse
1213 order. The code below implements a FILO (stack) to do this. */
1214
1215typedef struct
1216{
1217 /* Value to pass on stack. */
1218 const void *data;
1219
1220 /* Size in bytes of value to pass on stack. */
1221 int len;
1222} stack_item_t;
1223
1224DEF_VEC_O (stack_item_t);
1225
1226/* Return the alignment (in bytes) of the given type. */
1227
1228static int
1229aarch64_type_align (struct type *t)
1230{
1231 int n;
1232 int align;
1233 int falign;
1234
1235 t = check_typedef (t);
1236 switch (TYPE_CODE (t))
1237 {
1238 default:
1239 /* Should never happen. */
1240 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1241 return 4;
1242
1243 case TYPE_CODE_PTR:
1244 case TYPE_CODE_ENUM:
1245 case TYPE_CODE_INT:
1246 case TYPE_CODE_FLT:
1247 case TYPE_CODE_SET:
1248 case TYPE_CODE_RANGE:
1249 case TYPE_CODE_BITSTRING:
1250 case TYPE_CODE_REF:
1251 case TYPE_CODE_CHAR:
1252 case TYPE_CODE_BOOL:
1253 return TYPE_LENGTH (t);
1254
1255 case TYPE_CODE_ARRAY:
1256 case TYPE_CODE_COMPLEX:
1257 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1258
1259 case TYPE_CODE_STRUCT:
1260 case TYPE_CODE_UNION:
1261 align = 1;
1262 for (n = 0; n < TYPE_NFIELDS (t); n++)
1263 {
1264 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1265 if (falign > align)
1266 align = falign;
1267 }
1268 return align;
1269 }
1270}
1271
1272/* Return 1 if *TY is a homogeneous floating-point aggregate as
1273 defined in the AAPCS64 ABI document; otherwise return 0. */
1274
1275static int
1276is_hfa (struct type *ty)
1277{
1278 switch (TYPE_CODE (ty))
1279 {
1280 case TYPE_CODE_ARRAY:
1281 {
1282 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1283 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1284 return 1;
1285 break;
1286 }
1287
1288 case TYPE_CODE_UNION:
1289 case TYPE_CODE_STRUCT:
1290 {
1291 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1292 {
1293 struct type *member0_type;
1294
1295 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1296 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1297 {
1298 int i;
1299
1300 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1301 {
1302 struct type *member1_type;
1303
1304 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1305 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1306 || (TYPE_LENGTH (member0_type)
1307 != TYPE_LENGTH (member1_type)))
1308 return 0;
1309 }
1310 return 1;
1311 }
1312 }
1313 return 0;
1314 }
1315
1316 default:
1317 break;
1318 }
1319
1320 return 0;
1321}
1322
1323/* AArch64 function call information structure. */
1324struct aarch64_call_info
1325{
1326 /* the current argument number. */
1327 unsigned argnum;
1328
1329 /* The next general purpose register number, equivalent to NGRN as
1330 described in the AArch64 Procedure Call Standard. */
1331 unsigned ngrn;
1332
1333 /* The next SIMD and floating point register number, equivalent to
1334 NSRN as described in the AArch64 Procedure Call Standard. */
1335 unsigned nsrn;
1336
1337 /* The next stacked argument address, equivalent to NSAA as
1338 described in the AArch64 Procedure Call Standard. */
1339 unsigned nsaa;
1340
1341 /* Stack item vector. */
1342 VEC(stack_item_t) *si;
1343};
1344
1345/* Pass a value in a sequence of consecutive X registers. The caller
1346 is responsbile for ensuring sufficient registers are available. */
1347
1348static void
1349pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1350 struct aarch64_call_info *info, struct type *type,
1351 const bfd_byte *buf)
1352{
1353 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1354 int len = TYPE_LENGTH (type);
1355 enum type_code typecode = TYPE_CODE (type);
1356 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1357
1358 info->argnum++;
1359
1360 while (len > 0)
1361 {
1362 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1363 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1364 byte_order);
1365
1366
1367 /* Adjust sub-word struct/union args when big-endian. */
1368 if (byte_order == BFD_ENDIAN_BIG
1369 && partial_len < X_REGISTER_SIZE
1370 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1371 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1372
1373 if (aarch64_debug)
1374 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1375 info->argnum,
1376 gdbarch_register_name (gdbarch, regnum),
1377 phex (regval, X_REGISTER_SIZE));
1378 regcache_cooked_write_unsigned (regcache, regnum, regval);
1379 len -= partial_len;
1380 buf += partial_len;
1381 regnum++;
1382 }
1383}
1384
1385/* Attempt to marshall a value in a V register. Return 1 if
1386 successful, or 0 if insufficient registers are available. This
1387 function, unlike the equivalent pass_in_x() function does not
1388 handle arguments spread across multiple registers. */
1389
1390static int
1391pass_in_v (struct gdbarch *gdbarch,
1392 struct regcache *regcache,
1393 struct aarch64_call_info *info,
1394 const bfd_byte *buf)
1395{
1396 if (info->nsrn < 8)
1397 {
1398 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1399 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1400
1401 info->argnum++;
1402 info->nsrn++;
1403
1404 regcache_cooked_write (regcache, regnum, buf);
1405 if (aarch64_debug)
1406 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1407 info->argnum,
1408 gdbarch_register_name (gdbarch, regnum));
1409 return 1;
1410 }
1411 info->nsrn = 8;
1412 return 0;
1413}
1414
1415/* Marshall an argument onto the stack. */
1416
1417static void
1418pass_on_stack (struct aarch64_call_info *info, struct type *type,
1419 const bfd_byte *buf)
1420{
1421 int len = TYPE_LENGTH (type);
1422 int align;
1423 stack_item_t item;
1424
1425 info->argnum++;
1426
1427 align = aarch64_type_align (type);
1428
1429 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1430 Natural alignment of the argument's type. */
1431 align = align_up (align, 8);
1432
1433 /* The AArch64 PCS requires at most doubleword alignment. */
1434 if (align > 16)
1435 align = 16;
1436
1437 if (aarch64_debug)
1438 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1439 info->argnum, len, info->nsaa);
1440
1441 item.len = len;
1442 item.data = buf;
1443 VEC_safe_push (stack_item_t, info->si, &item);
1444
1445 info->nsaa += len;
1446 if (info->nsaa & (align - 1))
1447 {
1448 /* Push stack alignment padding. */
1449 int pad = align - (info->nsaa & (align - 1));
1450
1451 item.len = pad;
1452 item.data = buf;
1453
1454 VEC_safe_push (stack_item_t, info->si, &item);
1455 info->nsaa += pad;
1456 }
1457}
1458
1459/* Marshall an argument into a sequence of one or more consecutive X
1460 registers or, if insufficient X registers are available then onto
1461 the stack. */
1462
1463static void
1464pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1465 struct aarch64_call_info *info, struct type *type,
1466 const bfd_byte *buf)
1467{
1468 int len = TYPE_LENGTH (type);
1469 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1470
1471 /* PCS C.13 - Pass in registers if we have enough spare */
1472 if (info->ngrn + nregs <= 8)
1473 {
1474 pass_in_x (gdbarch, regcache, info, type, buf);
1475 info->ngrn += nregs;
1476 }
1477 else
1478 {
1479 info->ngrn = 8;
1480 pass_on_stack (info, type, buf);
1481 }
1482}
1483
1484/* Pass a value in a V register, or on the stack if insufficient are
1485 available. */
1486
1487static void
1488pass_in_v_or_stack (struct gdbarch *gdbarch,
1489 struct regcache *regcache,
1490 struct aarch64_call_info *info,
1491 struct type *type,
1492 const bfd_byte *buf)
1493{
1494 if (!pass_in_v (gdbarch, regcache, info, buf))
1495 pass_on_stack (info, type, buf);
1496}
1497
1498/* Implement the "push_dummy_call" gdbarch method. */
1499
1500static CORE_ADDR
1501aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1502 struct regcache *regcache, CORE_ADDR bp_addr,
1503 int nargs,
1504 struct value **args, CORE_ADDR sp, int struct_return,
1505 CORE_ADDR struct_addr)
1506{
1507 int nstack = 0;
1508 int argnum;
1509 int x_argreg;
1510 int v_argreg;
1511 struct aarch64_call_info info;
1512 struct type *func_type;
1513 struct type *return_type;
1514 int lang_struct_return;
1515
1516 memset (&info, 0, sizeof (info));
1517
1518 /* We need to know what the type of the called function is in order
1519 to determine the number of named/anonymous arguments for the
1520 actual argument placement, and the return type in order to handle
1521 return value correctly.
1522
1523 The generic code above us views the decision of return in memory
1524 or return in registers as a two stage processes. The language
1525 handler is consulted first and may decide to return in memory (eg
1526 class with copy constructor returned by value), this will cause
1527 the generic code to allocate space AND insert an initial leading
1528 argument.
1529
1530 If the language code does not decide to pass in memory then the
1531 target code is consulted.
1532
1533 If the language code decides to pass in memory we want to move
1534 the pointer inserted as the initial argument from the argument
1535 list and into X8, the conventional AArch64 struct return pointer
1536 register.
1537
1538 This is slightly awkward, ideally the flag "lang_struct_return"
1539 would be passed to the targets implementation of push_dummy_call.
1540 Rather that change the target interface we call the language code
1541 directly ourselves. */
1542
1543 func_type = check_typedef (value_type (function));
1544
1545 /* Dereference function pointer types. */
1546 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1547 func_type = TYPE_TARGET_TYPE (func_type);
1548
1549 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1550 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1551
1552 /* If language_pass_by_reference () returned true we will have been
1553 given an additional initial argument, a hidden pointer to the
1554 return slot in memory. */
1555 return_type = TYPE_TARGET_TYPE (func_type);
1556 lang_struct_return = language_pass_by_reference (return_type);
1557
1558 /* Set the return address. For the AArch64, the return breakpoint
1559 is always at BP_ADDR. */
1560 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1561
1562 /* If we were given an initial argument for the return slot because
1563 lang_struct_return was true, lose it. */
1564 if (lang_struct_return)
1565 {
1566 args++;
1567 nargs--;
1568 }
1569
1570 /* The struct_return pointer occupies X8. */
1571 if (struct_return || lang_struct_return)
1572 {
1573 if (aarch64_debug)
1574 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1575 gdbarch_register_name
1576 (gdbarch,
1577 AARCH64_STRUCT_RETURN_REGNUM),
1578 paddress (gdbarch, struct_addr));
1579 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1580 struct_addr);
1581 }
1582
1583 for (argnum = 0; argnum < nargs; argnum++)
1584 {
1585 struct value *arg = args[argnum];
1586 struct type *arg_type;
1587 int len;
1588
1589 arg_type = check_typedef (value_type (arg));
1590 len = TYPE_LENGTH (arg_type);
1591
1592 switch (TYPE_CODE (arg_type))
1593 {
1594 case TYPE_CODE_INT:
1595 case TYPE_CODE_BOOL:
1596 case TYPE_CODE_CHAR:
1597 case TYPE_CODE_RANGE:
1598 case TYPE_CODE_ENUM:
1599 if (len < 4)
1600 {
1601 /* Promote to 32 bit integer. */
1602 if (TYPE_UNSIGNED (arg_type))
1603 arg_type = builtin_type (gdbarch)->builtin_uint32;
1604 else
1605 arg_type = builtin_type (gdbarch)->builtin_int32;
1606 arg = value_cast (arg_type, arg);
1607 }
1608 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1609 value_contents (arg));
1610 break;
1611
1612 case TYPE_CODE_COMPLEX:
1613 if (info.nsrn <= 6)
1614 {
1615 const bfd_byte *buf = value_contents (arg);
1616 struct type *target_type =
1617 check_typedef (TYPE_TARGET_TYPE (arg_type));
1618
1619 pass_in_v (gdbarch, regcache, &info, buf);
1620 pass_in_v (gdbarch, regcache, &info,
1621 buf + TYPE_LENGTH (target_type));
1622 }
1623 else
1624 {
1625 info.nsrn = 8;
1626 pass_on_stack (&info, arg_type, value_contents (arg));
1627 }
1628 break;
1629 case TYPE_CODE_FLT:
1630 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1631 value_contents (arg));
1632 break;
1633
1634 case TYPE_CODE_STRUCT:
1635 case TYPE_CODE_ARRAY:
1636 case TYPE_CODE_UNION:
1637 if (is_hfa (arg_type))
1638 {
1639 int elements = TYPE_NFIELDS (arg_type);
1640
1641 /* Homogeneous Aggregates */
1642 if (info.nsrn + elements < 8)
1643 {
1644 int i;
1645
1646 for (i = 0; i < elements; i++)
1647 {
1648 /* We know that we have sufficient registers
1649 available therefore this will never fallback
1650 to the stack. */
1651 struct value *field =
1652 value_primitive_field (arg, 0, i, arg_type);
1653 struct type *field_type =
1654 check_typedef (value_type (field));
1655
1656 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1657 value_contents_writeable (field));
1658 }
1659 }
1660 else
1661 {
1662 info.nsrn = 8;
1663 pass_on_stack (&info, arg_type, value_contents (arg));
1664 }
1665 }
1666 else if (len > 16)
1667 {
1668 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1669 invisible reference. */
1670
1671 /* Allocate aligned storage. */
1672 sp = align_down (sp - len, 16);
1673
1674 /* Write the real data into the stack. */
1675 write_memory (sp, value_contents (arg), len);
1676
1677 /* Construct the indirection. */
1678 arg_type = lookup_pointer_type (arg_type);
1679 arg = value_from_pointer (arg_type, sp);
1680 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1681 value_contents (arg));
1682 }
1683 else
1684 /* PCS C.15 / C.18 multiple values pass. */
1685 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1686 value_contents (arg));
1687 break;
1688
1689 default:
1690 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1691 value_contents (arg));
1692 break;
1693 }
1694 }
1695
1696 /* Make sure stack retains 16 byte alignment. */
1697 if (info.nsaa & 15)
1698 sp -= 16 - (info.nsaa & 15);
1699
1700 while (!VEC_empty (stack_item_t, info.si))
1701 {
1702 stack_item_t *si = VEC_last (stack_item_t, info.si);
1703
1704 sp -= si->len;
1705 write_memory (sp, si->data, si->len);
1706 VEC_pop (stack_item_t, info.si);
1707 }
1708
1709 VEC_free (stack_item_t, info.si);
1710
1711 /* Finally, update the SP register. */
1712 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1713
1714 return sp;
1715}
1716
1717/* Implement the "frame_align" gdbarch method. */
1718
1719static CORE_ADDR
1720aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1721{
1722 /* Align the stack to sixteen bytes. */
1723 return sp & ~(CORE_ADDR) 15;
1724}
1725
1726/* Return the type for an AdvSISD Q register. */
1727
1728static struct type *
1729aarch64_vnq_type (struct gdbarch *gdbarch)
1730{
1731 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1732
1733 if (tdep->vnq_type == NULL)
1734 {
1735 struct type *t;
1736 struct type *elem;
1737
1738 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1739 TYPE_CODE_UNION);
1740
1741 elem = builtin_type (gdbarch)->builtin_uint128;
1742 append_composite_type_field (t, "u", elem);
1743
1744 elem = builtin_type (gdbarch)->builtin_int128;
1745 append_composite_type_field (t, "s", elem);
1746
1747 tdep->vnq_type = t;
1748 }
1749
1750 return tdep->vnq_type;
1751}
1752
1753/* Return the type for an AdvSISD D register. */
1754
1755static struct type *
1756aarch64_vnd_type (struct gdbarch *gdbarch)
1757{
1758 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1759
1760 if (tdep->vnd_type == NULL)
1761 {
1762 struct type *t;
1763 struct type *elem;
1764
1765 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1766 TYPE_CODE_UNION);
1767
1768 elem = builtin_type (gdbarch)->builtin_double;
1769 append_composite_type_field (t, "f", elem);
1770
1771 elem = builtin_type (gdbarch)->builtin_uint64;
1772 append_composite_type_field (t, "u", elem);
1773
1774 elem = builtin_type (gdbarch)->builtin_int64;
1775 append_composite_type_field (t, "s", elem);
1776
1777 tdep->vnd_type = t;
1778 }
1779
1780 return tdep->vnd_type;
1781}
1782
1783/* Return the type for an AdvSISD S register. */
1784
1785static struct type *
1786aarch64_vns_type (struct gdbarch *gdbarch)
1787{
1788 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1789
1790 if (tdep->vns_type == NULL)
1791 {
1792 struct type *t;
1793 struct type *elem;
1794
1795 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1796 TYPE_CODE_UNION);
1797
1798 elem = builtin_type (gdbarch)->builtin_float;
1799 append_composite_type_field (t, "f", elem);
1800
1801 elem = builtin_type (gdbarch)->builtin_uint32;
1802 append_composite_type_field (t, "u", elem);
1803
1804 elem = builtin_type (gdbarch)->builtin_int32;
1805 append_composite_type_field (t, "s", elem);
1806
1807 tdep->vns_type = t;
1808 }
1809
1810 return tdep->vns_type;
1811}
1812
1813/* Return the type for an AdvSISD H register. */
1814
1815static struct type *
1816aarch64_vnh_type (struct gdbarch *gdbarch)
1817{
1818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1819
1820 if (tdep->vnh_type == NULL)
1821 {
1822 struct type *t;
1823 struct type *elem;
1824
1825 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1826 TYPE_CODE_UNION);
1827
1828 elem = builtin_type (gdbarch)->builtin_uint16;
1829 append_composite_type_field (t, "u", elem);
1830
1831 elem = builtin_type (gdbarch)->builtin_int16;
1832 append_composite_type_field (t, "s", elem);
1833
1834 tdep->vnh_type = t;
1835 }
1836
1837 return tdep->vnh_type;
1838}
1839
1840/* Return the type for an AdvSISD B register. */
1841
1842static struct type *
1843aarch64_vnb_type (struct gdbarch *gdbarch)
1844{
1845 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1846
1847 if (tdep->vnb_type == NULL)
1848 {
1849 struct type *t;
1850 struct type *elem;
1851
1852 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1853 TYPE_CODE_UNION);
1854
1855 elem = builtin_type (gdbarch)->builtin_uint8;
1856 append_composite_type_field (t, "u", elem);
1857
1858 elem = builtin_type (gdbarch)->builtin_int8;
1859 append_composite_type_field (t, "s", elem);
1860
1861 tdep->vnb_type = t;
1862 }
1863
1864 return tdep->vnb_type;
1865}
1866
1867/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1868
1869static int
1870aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1871{
1872 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1873 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1874
1875 if (reg == AARCH64_DWARF_SP)
1876 return AARCH64_SP_REGNUM;
1877
1878 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1879 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1880
1881 return -1;
1882}
1883\f
1884
1885/* Implement the "print_insn" gdbarch method. */
1886
1887static int
1888aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1889{
1890 info->symbols = NULL;
1891 return print_insn_aarch64 (memaddr, info);
1892}
1893
1894/* AArch64 BRK software debug mode instruction.
1895 Note that AArch64 code is always little-endian.
1896 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1897static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1898
1899/* Implement the "breakpoint_from_pc" gdbarch method. */
1900
948f8e3d 1901static const gdb_byte *
07b287a0
MS
1902aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1903 int *lenptr)
1904{
1905 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1906
1907 *lenptr = sizeof (aarch64_default_breakpoint);
1908 return aarch64_default_breakpoint;
1909}
1910
1911/* Extract from an array REGS containing the (raw) register state a
1912 function return value of type TYPE, and copy that, in virtual
1913 format, into VALBUF. */
1914
1915static void
1916aarch64_extract_return_value (struct type *type, struct regcache *regs,
1917 gdb_byte *valbuf)
1918{
1919 struct gdbarch *gdbarch = get_regcache_arch (regs);
1920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1921
1922 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1923 {
1924 bfd_byte buf[V_REGISTER_SIZE];
1925 int len = TYPE_LENGTH (type);
1926
1927 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1928 memcpy (valbuf, buf, len);
1929 }
1930 else if (TYPE_CODE (type) == TYPE_CODE_INT
1931 || TYPE_CODE (type) == TYPE_CODE_CHAR
1932 || TYPE_CODE (type) == TYPE_CODE_BOOL
1933 || TYPE_CODE (type) == TYPE_CODE_PTR
1934 || TYPE_CODE (type) == TYPE_CODE_REF
1935 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1936 {
1937 /* If the the type is a plain integer, then the access is
1938 straight-forward. Otherwise we have to play around a bit
1939 more. */
1940 int len = TYPE_LENGTH (type);
1941 int regno = AARCH64_X0_REGNUM;
1942 ULONGEST tmp;
1943
1944 while (len > 0)
1945 {
1946 /* By using store_unsigned_integer we avoid having to do
1947 anything special for small big-endian values. */
1948 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1949 store_unsigned_integer (valbuf,
1950 (len > X_REGISTER_SIZE
1951 ? X_REGISTER_SIZE : len), byte_order, tmp);
1952 len -= X_REGISTER_SIZE;
1953 valbuf += X_REGISTER_SIZE;
1954 }
1955 }
1956 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1957 {
1958 int regno = AARCH64_V0_REGNUM;
1959 bfd_byte buf[V_REGISTER_SIZE];
1960 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1961 int len = TYPE_LENGTH (target_type);
1962
1963 regcache_cooked_read (regs, regno, buf);
1964 memcpy (valbuf, buf, len);
1965 valbuf += len;
1966 regcache_cooked_read (regs, regno + 1, buf);
1967 memcpy (valbuf, buf, len);
1968 valbuf += len;
1969 }
1970 else if (is_hfa (type))
1971 {
1972 int elements = TYPE_NFIELDS (type);
1973 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1974 int len = TYPE_LENGTH (member_type);
1975 int i;
1976
1977 for (i = 0; i < elements; i++)
1978 {
1979 int regno = AARCH64_V0_REGNUM + i;
1980 bfd_byte buf[X_REGISTER_SIZE];
1981
1982 if (aarch64_debug)
1983 fprintf_unfiltered (gdb_stdlog,
1984 "read HFA return value element %d from %s\n",
1985 i + 1,
1986 gdbarch_register_name (gdbarch, regno));
1987 regcache_cooked_read (regs, regno, buf);
1988
1989 memcpy (valbuf, buf, len);
1990 valbuf += len;
1991 }
1992 }
1993 else
1994 {
1995 /* For a structure or union the behaviour is as if the value had
1996 been stored to word-aligned memory and then loaded into
1997 registers with 64-bit load instruction(s). */
1998 int len = TYPE_LENGTH (type);
1999 int regno = AARCH64_X0_REGNUM;
2000 bfd_byte buf[X_REGISTER_SIZE];
2001
2002 while (len > 0)
2003 {
2004 regcache_cooked_read (regs, regno++, buf);
2005 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2006 len -= X_REGISTER_SIZE;
2007 valbuf += X_REGISTER_SIZE;
2008 }
2009 }
2010}
2011
2012
2013/* Will a function return an aggregate type in memory or in a
2014 register? Return 0 if an aggregate type can be returned in a
2015 register, 1 if it must be returned in memory. */
2016
2017static int
2018aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2019{
2020 int nRc;
2021 enum type_code code;
2022
2023 CHECK_TYPEDEF (type);
2024
2025 /* In the AArch64 ABI, "integer" like aggregate types are returned
2026 in registers. For an aggregate type to be integer like, its size
2027 must be less than or equal to 4 * X_REGISTER_SIZE. */
2028
2029 if (is_hfa (type))
2030 {
2031 /* PCS B.5 If the argument is a Named HFA, then the argument is
2032 used unmodified. */
2033 return 0;
2034 }
2035
2036 if (TYPE_LENGTH (type) > 16)
2037 {
2038 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2039 invisible reference. */
2040
2041 return 1;
2042 }
2043
2044 return 0;
2045}
2046
2047/* Write into appropriate registers a function return value of type
2048 TYPE, given in virtual format. */
2049
2050static void
2051aarch64_store_return_value (struct type *type, struct regcache *regs,
2052 const gdb_byte *valbuf)
2053{
2054 struct gdbarch *gdbarch = get_regcache_arch (regs);
2055 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2056
2057 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2058 {
2059 bfd_byte buf[V_REGISTER_SIZE];
2060 int len = TYPE_LENGTH (type);
2061
2062 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2063 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2064 }
2065 else if (TYPE_CODE (type) == TYPE_CODE_INT
2066 || TYPE_CODE (type) == TYPE_CODE_CHAR
2067 || TYPE_CODE (type) == TYPE_CODE_BOOL
2068 || TYPE_CODE (type) == TYPE_CODE_PTR
2069 || TYPE_CODE (type) == TYPE_CODE_REF
2070 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2071 {
2072 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2073 {
2074 /* Values of one word or less are zero/sign-extended and
2075 returned in r0. */
2076 bfd_byte tmpbuf[X_REGISTER_SIZE];
2077 LONGEST val = unpack_long (type, valbuf);
2078
2079 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2080 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2081 }
2082 else
2083 {
2084 /* Integral values greater than one word are stored in
2085 consecutive registers starting with r0. This will always
2086 be a multiple of the regiser size. */
2087 int len = TYPE_LENGTH (type);
2088 int regno = AARCH64_X0_REGNUM;
2089
2090 while (len > 0)
2091 {
2092 regcache_cooked_write (regs, regno++, valbuf);
2093 len -= X_REGISTER_SIZE;
2094 valbuf += X_REGISTER_SIZE;
2095 }
2096 }
2097 }
2098 else if (is_hfa (type))
2099 {
2100 int elements = TYPE_NFIELDS (type);
2101 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2102 int len = TYPE_LENGTH (member_type);
2103 int i;
2104
2105 for (i = 0; i < elements; i++)
2106 {
2107 int regno = AARCH64_V0_REGNUM + i;
2108 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2109
2110 if (aarch64_debug)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "write HFA return value element %d to %s\n",
2113 i + 1,
2114 gdbarch_register_name (gdbarch, regno));
2115
2116 memcpy (tmpbuf, valbuf, len);
2117 regcache_cooked_write (regs, regno, tmpbuf);
2118 valbuf += len;
2119 }
2120 }
2121 else
2122 {
2123 /* For a structure or union the behaviour is as if the value had
2124 been stored to word-aligned memory and then loaded into
2125 registers with 64-bit load instruction(s). */
2126 int len = TYPE_LENGTH (type);
2127 int regno = AARCH64_X0_REGNUM;
2128 bfd_byte tmpbuf[X_REGISTER_SIZE];
2129
2130 while (len > 0)
2131 {
2132 memcpy (tmpbuf, valbuf,
2133 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2134 regcache_cooked_write (regs, regno++, tmpbuf);
2135 len -= X_REGISTER_SIZE;
2136 valbuf += X_REGISTER_SIZE;
2137 }
2138 }
2139}
2140
2141/* Implement the "return_value" gdbarch method. */
2142
2143static enum return_value_convention
2144aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2145 struct type *valtype, struct regcache *regcache,
2146 gdb_byte *readbuf, const gdb_byte *writebuf)
2147{
2148 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2149
2150 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2151 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2152 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2153 {
2154 if (aarch64_return_in_memory (gdbarch, valtype))
2155 {
2156 if (aarch64_debug)
2157 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2158 return RETURN_VALUE_STRUCT_CONVENTION;
2159 }
2160 }
2161
2162 if (writebuf)
2163 aarch64_store_return_value (valtype, regcache, writebuf);
2164
2165 if (readbuf)
2166 aarch64_extract_return_value (valtype, regcache, readbuf);
2167
2168 if (aarch64_debug)
2169 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2170
2171 return RETURN_VALUE_REGISTER_CONVENTION;
2172}
2173
2174/* Implement the "get_longjmp_target" gdbarch method. */
2175
2176static int
2177aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2178{
2179 CORE_ADDR jb_addr;
2180 gdb_byte buf[X_REGISTER_SIZE];
2181 struct gdbarch *gdbarch = get_frame_arch (frame);
2182 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2183 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2184
2185 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2186
2187 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2188 X_REGISTER_SIZE))
2189 return 0;
2190
2191 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2192 return 1;
2193}
2194\f
2195
2196/* Return the pseudo register name corresponding to register regnum. */
2197
2198static const char *
2199aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2200{
2201 static const char *const q_name[] =
2202 {
2203 "q0", "q1", "q2", "q3",
2204 "q4", "q5", "q6", "q7",
2205 "q8", "q9", "q10", "q11",
2206 "q12", "q13", "q14", "q15",
2207 "q16", "q17", "q18", "q19",
2208 "q20", "q21", "q22", "q23",
2209 "q24", "q25", "q26", "q27",
2210 "q28", "q29", "q30", "q31",
2211 };
2212
2213 static const char *const d_name[] =
2214 {
2215 "d0", "d1", "d2", "d3",
2216 "d4", "d5", "d6", "d7",
2217 "d8", "d9", "d10", "d11",
2218 "d12", "d13", "d14", "d15",
2219 "d16", "d17", "d18", "d19",
2220 "d20", "d21", "d22", "d23",
2221 "d24", "d25", "d26", "d27",
2222 "d28", "d29", "d30", "d31",
2223 };
2224
2225 static const char *const s_name[] =
2226 {
2227 "s0", "s1", "s2", "s3",
2228 "s4", "s5", "s6", "s7",
2229 "s8", "s9", "s10", "s11",
2230 "s12", "s13", "s14", "s15",
2231 "s16", "s17", "s18", "s19",
2232 "s20", "s21", "s22", "s23",
2233 "s24", "s25", "s26", "s27",
2234 "s28", "s29", "s30", "s31",
2235 };
2236
2237 static const char *const h_name[] =
2238 {
2239 "h0", "h1", "h2", "h3",
2240 "h4", "h5", "h6", "h7",
2241 "h8", "h9", "h10", "h11",
2242 "h12", "h13", "h14", "h15",
2243 "h16", "h17", "h18", "h19",
2244 "h20", "h21", "h22", "h23",
2245 "h24", "h25", "h26", "h27",
2246 "h28", "h29", "h30", "h31",
2247 };
2248
2249 static const char *const b_name[] =
2250 {
2251 "b0", "b1", "b2", "b3",
2252 "b4", "b5", "b6", "b7",
2253 "b8", "b9", "b10", "b11",
2254 "b12", "b13", "b14", "b15",
2255 "b16", "b17", "b18", "b19",
2256 "b20", "b21", "b22", "b23",
2257 "b24", "b25", "b26", "b27",
2258 "b28", "b29", "b30", "b31",
2259 };
2260
2261 regnum -= gdbarch_num_regs (gdbarch);
2262
2263 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2264 return q_name[regnum - AARCH64_Q0_REGNUM];
2265
2266 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2267 return d_name[regnum - AARCH64_D0_REGNUM];
2268
2269 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2270 return s_name[regnum - AARCH64_S0_REGNUM];
2271
2272 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2273 return h_name[regnum - AARCH64_H0_REGNUM];
2274
2275 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2276 return b_name[regnum - AARCH64_B0_REGNUM];
2277
2278 internal_error (__FILE__, __LINE__,
2279 _("aarch64_pseudo_register_name: bad register number %d"),
2280 regnum);
2281}
2282
2283/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2284
2285static struct type *
2286aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2287{
2288 regnum -= gdbarch_num_regs (gdbarch);
2289
2290 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2291 return aarch64_vnq_type (gdbarch);
2292
2293 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2294 return aarch64_vnd_type (gdbarch);
2295
2296 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2297 return aarch64_vns_type (gdbarch);
2298
2299 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2300 return aarch64_vnh_type (gdbarch);
2301
2302 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2303 return aarch64_vnb_type (gdbarch);
2304
2305 internal_error (__FILE__, __LINE__,
2306 _("aarch64_pseudo_register_type: bad register number %d"),
2307 regnum);
2308}
2309
2310/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2311
2312static int
2313aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2314 struct reggroup *group)
2315{
2316 regnum -= gdbarch_num_regs (gdbarch);
2317
2318 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2319 return group == all_reggroup || group == vector_reggroup;
2320 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2321 return (group == all_reggroup || group == vector_reggroup
2322 || group == float_reggroup);
2323 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2324 return (group == all_reggroup || group == vector_reggroup
2325 || group == float_reggroup);
2326 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2327 return group == all_reggroup || group == vector_reggroup;
2328 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2329 return group == all_reggroup || group == vector_reggroup;
2330
2331 return group == all_reggroup;
2332}
2333
2334/* Implement the "pseudo_register_read_value" gdbarch method. */
2335
2336static struct value *
2337aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2338 struct regcache *regcache,
2339 int regnum)
2340{
2341 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2342 struct value *result_value;
2343 gdb_byte *buf;
2344
2345 result_value = allocate_value (register_type (gdbarch, regnum));
2346 VALUE_LVAL (result_value) = lval_register;
2347 VALUE_REGNUM (result_value) = regnum;
2348 buf = value_contents_raw (result_value);
2349
2350 regnum -= gdbarch_num_regs (gdbarch);
2351
2352 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2353 {
2354 enum register_status status;
2355 unsigned v_regnum;
2356
2357 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2358 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2359 if (status != REG_VALID)
2360 mark_value_bytes_unavailable (result_value, 0,
2361 TYPE_LENGTH (value_type (result_value)));
2362 else
2363 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2364 return result_value;
2365 }
2366
2367 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2368 {
2369 enum register_status status;
2370 unsigned v_regnum;
2371
2372 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2373 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2374 if (status != REG_VALID)
2375 mark_value_bytes_unavailable (result_value, 0,
2376 TYPE_LENGTH (value_type (result_value)));
2377 else
2378 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2379 return result_value;
2380 }
2381
2382 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2383 {
2384 enum register_status status;
2385 unsigned v_regnum;
2386
2387 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2388 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2389 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2390 return result_value;
2391 }
2392
2393 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2394 {
2395 enum register_status status;
2396 unsigned v_regnum;
2397
2398 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2399 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2400 if (status != REG_VALID)
2401 mark_value_bytes_unavailable (result_value, 0,
2402 TYPE_LENGTH (value_type (result_value)));
2403 else
2404 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2405 return result_value;
2406 }
2407
2408 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2409 {
2410 enum register_status status;
2411 unsigned v_regnum;
2412
2413 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2414 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2415 if (status != REG_VALID)
2416 mark_value_bytes_unavailable (result_value, 0,
2417 TYPE_LENGTH (value_type (result_value)));
2418 else
2419 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2420 return result_value;
2421 }
2422
2423 gdb_assert_not_reached ("regnum out of bound");
2424}
2425
2426/* Implement the "pseudo_register_write" gdbarch method. */
2427
2428static void
2429aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2430 int regnum, const gdb_byte *buf)
2431{
2432 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2433
2434 /* Ensure the register buffer is zero, we want gdb writes of the
2435 various 'scalar' pseudo registers to behavior like architectural
2436 writes, register width bytes are written the remainder are set to
2437 zero. */
2438 memset (reg_buf, 0, sizeof (reg_buf));
2439
2440 regnum -= gdbarch_num_regs (gdbarch);
2441
2442 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2443 {
2444 /* pseudo Q registers */
2445 unsigned v_regnum;
2446
2447 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2448 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2449 regcache_raw_write (regcache, v_regnum, reg_buf);
2450 return;
2451 }
2452
2453 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2454 {
2455 /* pseudo D registers */
2456 unsigned v_regnum;
2457
2458 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2459 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2460 regcache_raw_write (regcache, v_regnum, reg_buf);
2461 return;
2462 }
2463
2464 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2465 {
2466 unsigned v_regnum;
2467
2468 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2469 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2470 regcache_raw_write (regcache, v_regnum, reg_buf);
2471 return;
2472 }
2473
2474 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2475 {
2476 /* pseudo H registers */
2477 unsigned v_regnum;
2478
2479 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2480 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2481 regcache_raw_write (regcache, v_regnum, reg_buf);
2482 return;
2483 }
2484
2485 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2486 {
2487 /* pseudo B registers */
2488 unsigned v_regnum;
2489
2490 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2491 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2492 regcache_raw_write (regcache, v_regnum, reg_buf);
2493 return;
2494 }
2495
2496 gdb_assert_not_reached ("regnum out of bound");
2497}
2498
07b287a0
MS
2499/* Callback function for user_reg_add. */
2500
2501static struct value *
2502value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2503{
2504 const int *reg_p = baton;
2505
2506 return value_of_register (*reg_p, frame);
2507}
2508\f
2509
9404b58f
KM
2510/* Implement the "software_single_step" gdbarch method, needed to
2511 single step through atomic sequences on AArch64. */
2512
2513static int
2514aarch64_software_single_step (struct frame_info *frame)
2515{
2516 struct gdbarch *gdbarch = get_frame_arch (frame);
2517 struct address_space *aspace = get_frame_address_space (frame);
2518 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2519 const int insn_size = 4;
2520 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2521 CORE_ADDR pc = get_frame_pc (frame);
2522 CORE_ADDR breaks[2] = { -1, -1 };
2523 CORE_ADDR loc = pc;
2524 CORE_ADDR closing_insn = 0;
2525 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2526 byte_order_for_code);
2527 int index;
2528 int insn_count;
2529 int bc_insn_count = 0; /* Conditional branch instruction count. */
2530 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2531
2532 /* Look for a Load Exclusive instruction which begins the sequence. */
2533 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2534 return 0;
2535
2536 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2537 {
2538 int32_t offset;
2539 unsigned cond;
2540
2541 loc += insn_size;
2542 insn = read_memory_unsigned_integer (loc, insn_size,
2543 byte_order_for_code);
2544
2545 /* Check if the instruction is a conditional branch. */
2546 if (decode_bcond (loc, insn, &cond, &offset))
2547 {
2548 if (bc_insn_count >= 1)
2549 return 0;
2550
2551 /* It is, so we'll try to set a breakpoint at the destination. */
2552 breaks[1] = loc + offset;
2553
2554 bc_insn_count++;
2555 last_breakpoint++;
2556 }
2557
2558 /* Look for the Store Exclusive which closes the atomic sequence. */
2559 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2560 {
2561 closing_insn = loc;
2562 break;
2563 }
2564 }
2565
2566 /* We didn't find a closing Store Exclusive instruction, fall back. */
2567 if (!closing_insn)
2568 return 0;
2569
2570 /* Insert breakpoint after the end of the atomic sequence. */
2571 breaks[0] = loc + insn_size;
2572
2573 /* Check for duplicated breakpoints, and also check that the second
2574 breakpoint is not within the atomic sequence. */
2575 if (last_breakpoint
2576 && (breaks[1] == breaks[0]
2577 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2578 last_breakpoint = 0;
2579
2580 /* Insert the breakpoint at the end of the sequence, and one at the
2581 destination of the conditional branch, if it exists. */
2582 for (index = 0; index <= last_breakpoint; index++)
2583 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2584
2585 return 1;
2586}
2587
07b287a0
MS
2588/* Initialize the current architecture based on INFO. If possible,
2589 re-use an architecture from ARCHES, which is a list of
2590 architectures already created during this debugging session.
2591
2592 Called e.g. at program startup, when reading a core file, and when
2593 reading a binary file. */
2594
2595static struct gdbarch *
2596aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2597{
2598 struct gdbarch_tdep *tdep;
2599 struct gdbarch *gdbarch;
2600 struct gdbarch_list *best_arch;
2601 struct tdesc_arch_data *tdesc_data = NULL;
2602 const struct target_desc *tdesc = info.target_desc;
2603 int i;
2604 int have_fpa_registers = 1;
2605 int valid_p = 1;
2606 const struct tdesc_feature *feature;
2607 int num_regs = 0;
2608 int num_pseudo_regs = 0;
2609
2610 /* Ensure we always have a target descriptor. */
2611 if (!tdesc_has_registers (tdesc))
2612 tdesc = tdesc_aarch64;
2613
2614 gdb_assert (tdesc);
2615
2616 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2617
2618 if (feature == NULL)
2619 return NULL;
2620
2621 tdesc_data = tdesc_data_alloc ();
2622
2623 /* Validate the descriptor provides the mandatory core R registers
2624 and allocate their numbers. */
2625 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2626 valid_p &=
2627 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2628 aarch64_r_register_names[i]);
2629
2630 num_regs = AARCH64_X0_REGNUM + i;
2631
2632 /* Look for the V registers. */
2633 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2634 if (feature)
2635 {
2636 /* Validate the descriptor provides the mandatory V registers
2637 and allocate their numbers. */
2638 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2639 valid_p &=
2640 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2641 aarch64_v_register_names[i]);
2642
2643 num_regs = AARCH64_V0_REGNUM + i;
2644
2645 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2646 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2647 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2648 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2649 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2650 }
2651
2652 if (!valid_p)
2653 {
2654 tdesc_data_cleanup (tdesc_data);
2655 return NULL;
2656 }
2657
2658 /* AArch64 code is always little-endian. */
2659 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2660
2661 /* If there is already a candidate, use it. */
2662 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2663 best_arch != NULL;
2664 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2665 {
2666 /* Found a match. */
2667 break;
2668 }
2669
2670 if (best_arch != NULL)
2671 {
2672 if (tdesc_data != NULL)
2673 tdesc_data_cleanup (tdesc_data);
2674 return best_arch->gdbarch;
2675 }
2676
2677 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2678 gdbarch = gdbarch_alloc (&info, tdep);
2679
2680 /* This should be low enough for everything. */
2681 tdep->lowest_pc = 0x20;
2682 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2683 tdep->jb_elt_size = 8;
2684
2685 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2686 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2687
07b287a0
MS
2688 /* Frame handling. */
2689 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2690 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2691 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2692
2693 /* Advance PC across function entry code. */
2694 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2695
2696 /* The stack grows downward. */
2697 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2698
2699 /* Breakpoint manipulation. */
2700 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2701 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2702 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2703
2704 /* Information about registers, etc. */
2705 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2706 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2707 set_gdbarch_num_regs (gdbarch, num_regs);
2708
2709 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2710 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2711 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2712 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2713 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2714 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2715 aarch64_pseudo_register_reggroup_p);
2716
2717 /* ABI */
2718 set_gdbarch_short_bit (gdbarch, 16);
2719 set_gdbarch_int_bit (gdbarch, 32);
2720 set_gdbarch_float_bit (gdbarch, 32);
2721 set_gdbarch_double_bit (gdbarch, 64);
2722 set_gdbarch_long_double_bit (gdbarch, 128);
2723 set_gdbarch_long_bit (gdbarch, 64);
2724 set_gdbarch_long_long_bit (gdbarch, 64);
2725 set_gdbarch_ptr_bit (gdbarch, 64);
2726 set_gdbarch_char_signed (gdbarch, 0);
2727 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2728 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2729 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2730
2731 /* Internal <-> external register number maps. */
2732 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2733
2734 /* Returning results. */
2735 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2736
2737 /* Disassembly. */
2738 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2739
2740 /* Virtual tables. */
2741 set_gdbarch_vbit_in_delta (gdbarch, 1);
2742
2743 /* Hook in the ABI-specific overrides, if they have been registered. */
2744 info.target_desc = tdesc;
2745 info.tdep_info = (void *) tdesc_data;
2746 gdbarch_init_osabi (info, gdbarch);
2747
2748 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2749
2750 /* Add some default predicates. */
2751 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2752 dwarf2_append_unwinders (gdbarch);
2753 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2754
2755 frame_base_set_default (gdbarch, &aarch64_normal_base);
2756
2757 /* Now we have tuned the configuration, set a few final things,
2758 based on what the OS ABI has told us. */
2759
2760 if (tdep->jb_pc >= 0)
2761 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2762
2763 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2764
2765 /* Add standard register aliases. */
2766 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2767 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2768 value_of_aarch64_user_reg,
2769 &aarch64_register_aliases[i].regnum);
2770
2771 return gdbarch;
2772}
2773
2774static void
2775aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2776{
2777 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2778
2779 if (tdep == NULL)
2780 return;
2781
2782 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2783 paddress (gdbarch, tdep->lowest_pc));
2784}
2785
2786/* Suppress warning from -Wmissing-prototypes. */
2787extern initialize_file_ftype _initialize_aarch64_tdep;
2788
2789void
2790_initialize_aarch64_tdep (void)
2791{
2792 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2793 aarch64_dump_tdep);
2794
2795 initialize_tdesc_aarch64 ();
07b287a0
MS
2796
2797 /* Debug this file's internals. */
2798 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2799Set AArch64 debugging."), _("\
2800Show AArch64 debugging."), _("\
2801When on, AArch64 specific debugging is enabled."),
2802 NULL,
2803 show_aarch64_debug,
2804 &setdebuglist, &showdebuglist);
2805}
99afc88b
OJ
2806
2807/* AArch64 process record-replay related structures, defines etc. */
2808
2809#define submask(x) ((1L << ((x) + 1)) - 1)
2810#define bit(obj,st) (((obj) >> (st)) & 1)
2811#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2812
2813#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2814 do \
2815 { \
2816 unsigned int reg_len = LENGTH; \
2817 if (reg_len) \
2818 { \
2819 REGS = XNEWVEC (uint32_t, reg_len); \
2820 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2821 } \
2822 } \
2823 while (0)
2824
2825#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2826 do \
2827 { \
2828 unsigned int mem_len = LENGTH; \
2829 if (mem_len) \
2830 { \
2831 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2832 memcpy(&MEMS->len, &RECORD_BUF[0], \
2833 sizeof(struct aarch64_mem_r) * LENGTH); \
2834 } \
2835 } \
2836 while (0)
2837
2838/* AArch64 record/replay structures and enumerations. */
2839
2840struct aarch64_mem_r
2841{
2842 uint64_t len; /* Record length. */
2843 uint64_t addr; /* Memory address. */
2844};
2845
2846enum aarch64_record_result
2847{
2848 AARCH64_RECORD_SUCCESS,
2849 AARCH64_RECORD_FAILURE,
2850 AARCH64_RECORD_UNSUPPORTED,
2851 AARCH64_RECORD_UNKNOWN
2852};
2853
2854typedef struct insn_decode_record_t
2855{
2856 struct gdbarch *gdbarch;
2857 struct regcache *regcache;
2858 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2859 uint32_t aarch64_insn; /* Insn to be recorded. */
2860 uint32_t mem_rec_count; /* Count of memory records. */
2861 uint32_t reg_rec_count; /* Count of register records. */
2862 uint32_t *aarch64_regs; /* Registers to be recorded. */
2863 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2864} insn_decode_record;
2865
2866/* Record handler for data processing - register instructions. */
2867
2868static unsigned int
2869aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2870{
2871 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2872 uint32_t record_buf[4];
2873
2874 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2875 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2876 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2877
2878 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2879 {
2880 uint8_t setflags;
2881
2882 /* Logical (shifted register). */
2883 if (insn_bits24_27 == 0x0a)
2884 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2885 /* Add/subtract. */
2886 else if (insn_bits24_27 == 0x0b)
2887 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2888 else
2889 return AARCH64_RECORD_UNKNOWN;
2890
2891 record_buf[0] = reg_rd;
2892 aarch64_insn_r->reg_rec_count = 1;
2893 if (setflags)
2894 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2895 }
2896 else
2897 {
2898 if (insn_bits24_27 == 0x0b)
2899 {
2900 /* Data-processing (3 source). */
2901 record_buf[0] = reg_rd;
2902 aarch64_insn_r->reg_rec_count = 1;
2903 }
2904 else if (insn_bits24_27 == 0x0a)
2905 {
2906 if (insn_bits21_23 == 0x00)
2907 {
2908 /* Add/subtract (with carry). */
2909 record_buf[0] = reg_rd;
2910 aarch64_insn_r->reg_rec_count = 1;
2911 if (bit (aarch64_insn_r->aarch64_insn, 29))
2912 {
2913 record_buf[1] = AARCH64_CPSR_REGNUM;
2914 aarch64_insn_r->reg_rec_count = 2;
2915 }
2916 }
2917 else if (insn_bits21_23 == 0x02)
2918 {
2919 /* Conditional compare (register) and conditional compare
2920 (immediate) instructions. */
2921 record_buf[0] = AARCH64_CPSR_REGNUM;
2922 aarch64_insn_r->reg_rec_count = 1;
2923 }
2924 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2925 {
2926 /* CConditional select. */
2927 /* Data-processing (2 source). */
2928 /* Data-processing (1 source). */
2929 record_buf[0] = reg_rd;
2930 aarch64_insn_r->reg_rec_count = 1;
2931 }
2932 else
2933 return AARCH64_RECORD_UNKNOWN;
2934 }
2935 }
2936
2937 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2938 record_buf);
2939 return AARCH64_RECORD_SUCCESS;
2940}
2941
2942/* Record handler for data processing - immediate instructions. */
2943
2944static unsigned int
2945aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2946{
2947 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2948 uint32_t record_buf[4];
2949
2950 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2951 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2952 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2953 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2954
2955 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2956 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2957 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2958 {
2959 record_buf[0] = reg_rd;
2960 aarch64_insn_r->reg_rec_count = 1;
2961 }
2962 else if (insn_bits24_27 == 0x01)
2963 {
2964 /* Add/Subtract (immediate). */
2965 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2966 record_buf[0] = reg_rd;
2967 aarch64_insn_r->reg_rec_count = 1;
2968 if (setflags)
2969 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2970 }
2971 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2972 {
2973 /* Logical (immediate). */
2974 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2975 record_buf[0] = reg_rd;
2976 aarch64_insn_r->reg_rec_count = 1;
2977 if (setflags)
2978 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2979 }
2980 else
2981 return AARCH64_RECORD_UNKNOWN;
2982
2983 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2984 record_buf);
2985 return AARCH64_RECORD_SUCCESS;
2986}
2987
2988/* Record handler for branch, exception generation and system instructions. */
2989
2990static unsigned int
2991aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2992{
2993 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2994 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2995 uint32_t record_buf[4];
2996
2997 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2998 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
2999 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3000
3001 if (insn_bits28_31 == 0x0d)
3002 {
3003 /* Exception generation instructions. */
3004 if (insn_bits24_27 == 0x04)
3005 {
5d98d3cd
YQ
3006 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3007 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3008 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3009 {
3010 ULONGEST svc_number;
3011
3012 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3013 &svc_number);
3014 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3015 svc_number);
3016 }
3017 else
3018 return AARCH64_RECORD_UNSUPPORTED;
3019 }
3020 /* System instructions. */
3021 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3022 {
3023 uint32_t reg_rt, reg_crn;
3024
3025 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3026 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3027
3028 /* Record rt in case of sysl and mrs instructions. */
3029 if (bit (aarch64_insn_r->aarch64_insn, 21))
3030 {
3031 record_buf[0] = reg_rt;
3032 aarch64_insn_r->reg_rec_count = 1;
3033 }
3034 /* Record cpsr for hint and msr(immediate) instructions. */
3035 else if (reg_crn == 0x02 || reg_crn == 0x04)
3036 {
3037 record_buf[0] = AARCH64_CPSR_REGNUM;
3038 aarch64_insn_r->reg_rec_count = 1;
3039 }
3040 }
3041 /* Unconditional branch (register). */
3042 else if((insn_bits24_27 & 0x0e) == 0x06)
3043 {
3044 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3045 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3046 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3047 }
3048 else
3049 return AARCH64_RECORD_UNKNOWN;
3050 }
3051 /* Unconditional branch (immediate). */
3052 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3053 {
3054 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3055 if (bit (aarch64_insn_r->aarch64_insn, 31))
3056 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3057 }
3058 else
3059 /* Compare & branch (immediate), Test & branch (immediate) and
3060 Conditional branch (immediate). */
3061 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3062
3063 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3064 record_buf);
3065 return AARCH64_RECORD_SUCCESS;
3066}
3067
3068/* Record handler for advanced SIMD load and store instructions. */
3069
3070static unsigned int
3071aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3072{
3073 CORE_ADDR address;
3074 uint64_t addr_offset = 0;
3075 uint32_t record_buf[24];
3076 uint64_t record_buf_mem[24];
3077 uint32_t reg_rn, reg_rt;
3078 uint32_t reg_index = 0, mem_index = 0;
3079 uint8_t opcode_bits, size_bits;
3080
3081 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3082 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3083 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3084 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3085 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3086
3087 if (record_debug)
3088 {
3089 fprintf_unfiltered (gdb_stdlog,
3090 "Process record: Advanced SIMD load/store\n");
3091 }
3092
3093 /* Load/store single structure. */
3094 if (bit (aarch64_insn_r->aarch64_insn, 24))
3095 {
3096 uint8_t sindex, scale, selem, esize, replicate = 0;
3097 scale = opcode_bits >> 2;
3098 selem = ((opcode_bits & 0x02) |
3099 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3100 switch (scale)
3101 {
3102 case 1:
3103 if (size_bits & 0x01)
3104 return AARCH64_RECORD_UNKNOWN;
3105 break;
3106 case 2:
3107 if ((size_bits >> 1) & 0x01)
3108 return AARCH64_RECORD_UNKNOWN;
3109 if (size_bits & 0x01)
3110 {
3111 if (!((opcode_bits >> 1) & 0x01))
3112 scale = 3;
3113 else
3114 return AARCH64_RECORD_UNKNOWN;
3115 }
3116 break;
3117 case 3:
3118 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3119 {
3120 scale = size_bits;
3121 replicate = 1;
3122 break;
3123 }
3124 else
3125 return AARCH64_RECORD_UNKNOWN;
3126 default:
3127 break;
3128 }
3129 esize = 8 << scale;
3130 if (replicate)
3131 for (sindex = 0; sindex < selem; sindex++)
3132 {
3133 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3134 reg_rt = (reg_rt + 1) % 32;
3135 }
3136 else
3137 {
3138 for (sindex = 0; sindex < selem; sindex++)
3139 if (bit (aarch64_insn_r->aarch64_insn, 22))
3140 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3141 else
3142 {
3143 record_buf_mem[mem_index++] = esize / 8;
3144 record_buf_mem[mem_index++] = address + addr_offset;
3145 }
3146 addr_offset = addr_offset + (esize / 8);
3147 reg_rt = (reg_rt + 1) % 32;
3148 }
3149 }
3150 /* Load/store multiple structure. */
3151 else
3152 {
3153 uint8_t selem, esize, rpt, elements;
3154 uint8_t eindex, rindex;
3155
3156 esize = 8 << size_bits;
3157 if (bit (aarch64_insn_r->aarch64_insn, 30))
3158 elements = 128 / esize;
3159 else
3160 elements = 64 / esize;
3161
3162 switch (opcode_bits)
3163 {
3164 /*LD/ST4 (4 Registers). */
3165 case 0:
3166 rpt = 1;
3167 selem = 4;
3168 break;
3169 /*LD/ST1 (4 Registers). */
3170 case 2:
3171 rpt = 4;
3172 selem = 1;
3173 break;
3174 /*LD/ST3 (3 Registers). */
3175 case 4:
3176 rpt = 1;
3177 selem = 3;
3178 break;
3179 /*LD/ST1 (3 Registers). */
3180 case 6:
3181 rpt = 3;
3182 selem = 1;
3183 break;
3184 /*LD/ST1 (1 Register). */
3185 case 7:
3186 rpt = 1;
3187 selem = 1;
3188 break;
3189 /*LD/ST2 (2 Registers). */
3190 case 8:
3191 rpt = 1;
3192 selem = 2;
3193 break;
3194 /*LD/ST1 (2 Registers). */
3195 case 10:
3196 rpt = 2;
3197 selem = 1;
3198 break;
3199 default:
3200 return AARCH64_RECORD_UNSUPPORTED;
3201 break;
3202 }
3203 for (rindex = 0; rindex < rpt; rindex++)
3204 for (eindex = 0; eindex < elements; eindex++)
3205 {
3206 uint8_t reg_tt, sindex;
3207 reg_tt = (reg_rt + rindex) % 32;
3208 for (sindex = 0; sindex < selem; sindex++)
3209 {
3210 if (bit (aarch64_insn_r->aarch64_insn, 22))
3211 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3212 else
3213 {
3214 record_buf_mem[mem_index++] = esize / 8;
3215 record_buf_mem[mem_index++] = address + addr_offset;
3216 }
3217 addr_offset = addr_offset + (esize / 8);
3218 reg_tt = (reg_tt + 1) % 32;
3219 }
3220 }
3221 }
3222
3223 if (bit (aarch64_insn_r->aarch64_insn, 23))
3224 record_buf[reg_index++] = reg_rn;
3225
3226 aarch64_insn_r->reg_rec_count = reg_index;
3227 aarch64_insn_r->mem_rec_count = mem_index / 2;
3228 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3229 record_buf_mem);
3230 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3231 record_buf);
3232 return AARCH64_RECORD_SUCCESS;
3233}
3234
3235/* Record handler for load and store instructions. */
3236
3237static unsigned int
3238aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3239{
3240 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3241 uint8_t insn_bit23, insn_bit21;
3242 uint8_t opc, size_bits, ld_flag, vector_flag;
3243 uint32_t reg_rn, reg_rt, reg_rt2;
3244 uint64_t datasize, offset;
3245 uint32_t record_buf[8];
3246 uint64_t record_buf_mem[8];
3247 CORE_ADDR address;
3248
3249 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3250 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3251 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3252 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3253 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3254 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3255 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3256 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3257 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3258 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3259 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3260
3261 /* Load/store exclusive. */
3262 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3263 {
3264 if (record_debug)
3265 {
3266 fprintf_unfiltered (gdb_stdlog,
3267 "Process record: load/store exclusive\n");
3268 }
3269
3270 if (ld_flag)
3271 {
3272 record_buf[0] = reg_rt;
3273 aarch64_insn_r->reg_rec_count = 1;
3274 if (insn_bit21)
3275 {
3276 record_buf[1] = reg_rt2;
3277 aarch64_insn_r->reg_rec_count = 2;
3278 }
3279 }
3280 else
3281 {
3282 if (insn_bit21)
3283 datasize = (8 << size_bits) * 2;
3284 else
3285 datasize = (8 << size_bits);
3286 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3287 &address);
3288 record_buf_mem[0] = datasize / 8;
3289 record_buf_mem[1] = address;
3290 aarch64_insn_r->mem_rec_count = 1;
3291 if (!insn_bit23)
3292 {
3293 /* Save register rs. */
3294 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3295 aarch64_insn_r->reg_rec_count = 1;
3296 }
3297 }
3298 }
3299 /* Load register (literal) instructions decoding. */
3300 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3301 {
3302 if (record_debug)
3303 {
3304 fprintf_unfiltered (gdb_stdlog,
3305 "Process record: load register (literal)\n");
3306 }
3307 if (vector_flag)
3308 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3309 else
3310 record_buf[0] = reg_rt;
3311 aarch64_insn_r->reg_rec_count = 1;
3312 }
3313 /* All types of load/store pair instructions decoding. */
3314 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3315 {
3316 if (record_debug)
3317 {
3318 fprintf_unfiltered (gdb_stdlog,
3319 "Process record: load/store pair\n");
3320 }
3321
3322 if (ld_flag)
3323 {
3324 if (vector_flag)
3325 {
3326 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3327 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3328 }
3329 else
3330 {
3331 record_buf[0] = reg_rt;
3332 record_buf[1] = reg_rt2;
3333 }
3334 aarch64_insn_r->reg_rec_count = 2;
3335 }
3336 else
3337 {
3338 uint16_t imm7_off;
3339 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3340 if (!vector_flag)
3341 size_bits = size_bits >> 1;
3342 datasize = 8 << (2 + size_bits);
3343 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3344 offset = offset << (2 + size_bits);
3345 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3346 &address);
3347 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3348 {
3349 if (imm7_off & 0x40)
3350 address = address - offset;
3351 else
3352 address = address + offset;
3353 }
3354
3355 record_buf_mem[0] = datasize / 8;
3356 record_buf_mem[1] = address;
3357 record_buf_mem[2] = datasize / 8;
3358 record_buf_mem[3] = address + (datasize / 8);
3359 aarch64_insn_r->mem_rec_count = 2;
3360 }
3361 if (bit (aarch64_insn_r->aarch64_insn, 23))
3362 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3363 }
3364 /* Load/store register (unsigned immediate) instructions. */
3365 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3366 {
3367 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3368 if (!(opc >> 1))
3369 if (opc & 0x01)
3370 ld_flag = 0x01;
3371 else
3372 ld_flag = 0x0;
3373 else
3374 if (size_bits != 0x03)
3375 ld_flag = 0x01;
3376 else
3377 return AARCH64_RECORD_UNKNOWN;
3378
3379 if (record_debug)
3380 {
3381 fprintf_unfiltered (gdb_stdlog,
3382 "Process record: load/store (unsigned immediate):"
3383 " size %x V %d opc %x\n", size_bits, vector_flag,
3384 opc);
3385 }
3386
3387 if (!ld_flag)
3388 {
3389 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3390 datasize = 8 << size_bits;
3391 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3392 &address);
3393 offset = offset << size_bits;
3394 address = address + offset;
3395
3396 record_buf_mem[0] = datasize >> 3;
3397 record_buf_mem[1] = address;
3398 aarch64_insn_r->mem_rec_count = 1;
3399 }
3400 else
3401 {
3402 if (vector_flag)
3403 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3404 else
3405 record_buf[0] = reg_rt;
3406 aarch64_insn_r->reg_rec_count = 1;
3407 }
3408 }
3409 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3410 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3411 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3412 {
3413 if (record_debug)
3414 {
3415 fprintf_unfiltered (gdb_stdlog,
3416 "Process record: load/store (register offset)\n");
3417 }
3418 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3419 if (!(opc >> 1))
3420 if (opc & 0x01)
3421 ld_flag = 0x01;
3422 else
3423 ld_flag = 0x0;
3424 else
3425 if (size_bits != 0x03)
3426 ld_flag = 0x01;
3427 else
3428 return AARCH64_RECORD_UNKNOWN;
3429
3430 if (!ld_flag)
3431 {
3432 uint64_t reg_rm_val;
3433 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3434 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3435 if (bit (aarch64_insn_r->aarch64_insn, 12))
3436 offset = reg_rm_val << size_bits;
3437 else
3438 offset = reg_rm_val;
3439 datasize = 8 << size_bits;
3440 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3441 &address);
3442 address = address + offset;
3443 record_buf_mem[0] = datasize >> 3;
3444 record_buf_mem[1] = address;
3445 aarch64_insn_r->mem_rec_count = 1;
3446 }
3447 else
3448 {
3449 if (vector_flag)
3450 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3451 else
3452 record_buf[0] = reg_rt;
3453 aarch64_insn_r->reg_rec_count = 1;
3454 }
3455 }
3456 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3457 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3458 && !insn_bit21)
99afc88b
OJ
3459 {
3460 if (record_debug)
3461 {
3462 fprintf_unfiltered (gdb_stdlog,
3463 "Process record: load/store (immediate and unprivileged)\n");
3464 }
3465 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3466 if (!(opc >> 1))
3467 if (opc & 0x01)
3468 ld_flag = 0x01;
3469 else
3470 ld_flag = 0x0;
3471 else
3472 if (size_bits != 0x03)
3473 ld_flag = 0x01;
3474 else
3475 return AARCH64_RECORD_UNKNOWN;
3476
3477 if (!ld_flag)
3478 {
3479 uint16_t imm9_off;
3480 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3481 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3482 datasize = 8 << size_bits;
3483 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3484 &address);
3485 if (insn_bits10_11 != 0x01)
3486 {
3487 if (imm9_off & 0x0100)
3488 address = address - offset;
3489 else
3490 address = address + offset;
3491 }
3492 record_buf_mem[0] = datasize >> 3;
3493 record_buf_mem[1] = address;
3494 aarch64_insn_r->mem_rec_count = 1;
3495 }
3496 else
3497 {
3498 if (vector_flag)
3499 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3500 else
3501 record_buf[0] = reg_rt;
3502 aarch64_insn_r->reg_rec_count = 1;
3503 }
3504 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3505 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3506 }
3507 /* Advanced SIMD load/store instructions. */
3508 else
3509 return aarch64_record_asimd_load_store (aarch64_insn_r);
3510
3511 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3512 record_buf_mem);
3513 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3514 record_buf);
3515 return AARCH64_RECORD_SUCCESS;
3516}
3517
3518/* Record handler for data processing SIMD and floating point instructions. */
3519
3520static unsigned int
3521aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3522{
3523 uint8_t insn_bit21, opcode, rmode, reg_rd;
3524 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3525 uint8_t insn_bits11_14;
3526 uint32_t record_buf[2];
3527
3528 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3529 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3530 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3531 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3532 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3533 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3534 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3535 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3536 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3537
3538 if (record_debug)
3539 {
3540 fprintf_unfiltered (gdb_stdlog,
3541 "Process record: data processing SIMD/FP: ");
3542 }
3543
3544 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3545 {
3546 /* Floating point - fixed point conversion instructions. */
3547 if (!insn_bit21)
3548 {
3549 if (record_debug)
3550 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3551
3552 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3553 record_buf[0] = reg_rd;
3554 else
3555 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3556 }
3557 /* Floating point - conditional compare instructions. */
3558 else if (insn_bits10_11 == 0x01)
3559 {
3560 if (record_debug)
3561 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3562
3563 record_buf[0] = AARCH64_CPSR_REGNUM;
3564 }
3565 /* Floating point - data processing (2-source) and
3566 conditional select instructions. */
3567 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3568 {
3569 if (record_debug)
3570 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3571
3572 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3573 }
3574 else if (insn_bits10_11 == 0x00)
3575 {
3576 /* Floating point - immediate instructions. */
3577 if ((insn_bits12_15 & 0x01) == 0x01
3578 || (insn_bits12_15 & 0x07) == 0x04)
3579 {
3580 if (record_debug)
3581 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3582 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3583 }
3584 /* Floating point - compare instructions. */
3585 else if ((insn_bits12_15 & 0x03) == 0x02)
3586 {
3587 if (record_debug)
3588 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3589 record_buf[0] = AARCH64_CPSR_REGNUM;
3590 }
3591 /* Floating point - integer conversions instructions. */
f62fce35 3592 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3593 {
3594 /* Convert float to integer instruction. */
3595 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3596 {
3597 if (record_debug)
3598 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3599
3600 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3601 }
3602 /* Convert integer to float instruction. */
3603 else if ((opcode >> 1) == 0x01 && !rmode)
3604 {
3605 if (record_debug)
3606 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3607
3608 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3609 }
3610 /* Move float to integer instruction. */
3611 else if ((opcode >> 1) == 0x03)
3612 {
3613 if (record_debug)
3614 fprintf_unfiltered (gdb_stdlog, "move float to int");
3615
3616 if (!(opcode & 0x01))
3617 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3618 else
3619 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3620 }
f62fce35
YQ
3621 else
3622 return AARCH64_RECORD_UNKNOWN;
99afc88b 3623 }
f62fce35
YQ
3624 else
3625 return AARCH64_RECORD_UNKNOWN;
99afc88b 3626 }
f62fce35
YQ
3627 else
3628 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3629 }
3630 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3631 {
3632 if (record_debug)
3633 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3634
3635 /* Advanced SIMD copy instructions. */
3636 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3637 && !bit (aarch64_insn_r->aarch64_insn, 15)
3638 && bit (aarch64_insn_r->aarch64_insn, 10))
3639 {
3640 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3641 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3642 else
3643 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3644 }
3645 else
3646 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3647 }
3648 /* All remaining floating point or advanced SIMD instructions. */
3649 else
3650 {
3651 if (record_debug)
3652 fprintf_unfiltered (gdb_stdlog, "all remain");
3653
3654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3655 }
3656
3657 if (record_debug)
3658 fprintf_unfiltered (gdb_stdlog, "\n");
3659
3660 aarch64_insn_r->reg_rec_count++;
3661 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3662 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3663 record_buf);
3664 return AARCH64_RECORD_SUCCESS;
3665}
3666
3667/* Decodes insns type and invokes its record handler. */
3668
3669static unsigned int
3670aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3671{
3672 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3673
3674 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3675 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3676 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3677 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3678
3679 /* Data processing - immediate instructions. */
3680 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3681 return aarch64_record_data_proc_imm (aarch64_insn_r);
3682
3683 /* Branch, exception generation and system instructions. */
3684 if (ins_bit26 && !ins_bit27 && ins_bit28)
3685 return aarch64_record_branch_except_sys (aarch64_insn_r);
3686
3687 /* Load and store instructions. */
3688 if (!ins_bit25 && ins_bit27)
3689 return aarch64_record_load_store (aarch64_insn_r);
3690
3691 /* Data processing - register instructions. */
3692 if (ins_bit25 && !ins_bit26 && ins_bit27)
3693 return aarch64_record_data_proc_reg (aarch64_insn_r);
3694
3695 /* Data processing - SIMD and floating point instructions. */
3696 if (ins_bit25 && ins_bit26 && ins_bit27)
3697 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3698
3699 return AARCH64_RECORD_UNSUPPORTED;
3700}
3701
3702/* Cleans up local record registers and memory allocations. */
3703
3704static void
3705deallocate_reg_mem (insn_decode_record *record)
3706{
3707 xfree (record->aarch64_regs);
3708 xfree (record->aarch64_mems);
3709}
3710
3711/* Parse the current instruction and record the values of the registers and
3712 memory that will be changed in current instruction to record_arch_list
3713 return -1 if something is wrong. */
3714
3715int
3716aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3717 CORE_ADDR insn_addr)
3718{
3719 uint32_t rec_no = 0;
3720 uint8_t insn_size = 4;
3721 uint32_t ret = 0;
3722 ULONGEST t_bit = 0, insn_id = 0;
3723 gdb_byte buf[insn_size];
3724 insn_decode_record aarch64_record;
3725
3726 memset (&buf[0], 0, insn_size);
3727 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3728 target_read_memory (insn_addr, &buf[0], insn_size);
3729 aarch64_record.aarch64_insn
3730 = (uint32_t) extract_unsigned_integer (&buf[0],
3731 insn_size,
3732 gdbarch_byte_order (gdbarch));
3733 aarch64_record.regcache = regcache;
3734 aarch64_record.this_addr = insn_addr;
3735 aarch64_record.gdbarch = gdbarch;
3736
3737 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3738 if (ret == AARCH64_RECORD_UNSUPPORTED)
3739 {
3740 printf_unfiltered (_("Process record does not support instruction "
3741 "0x%0x at address %s.\n"),
3742 aarch64_record.aarch64_insn,
3743 paddress (gdbarch, insn_addr));
3744 ret = -1;
3745 }
3746
3747 if (0 == ret)
3748 {
3749 /* Record registers. */
3750 record_full_arch_list_add_reg (aarch64_record.regcache,
3751 AARCH64_PC_REGNUM);
3752 /* Always record register CPSR. */
3753 record_full_arch_list_add_reg (aarch64_record.regcache,
3754 AARCH64_CPSR_REGNUM);
3755 if (aarch64_record.aarch64_regs)
3756 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3757 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3758 aarch64_record.aarch64_regs[rec_no]))
3759 ret = -1;
3760
3761 /* Record memories. */
3762 if (aarch64_record.aarch64_mems)
3763 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3764 if (record_full_arch_list_add_mem
3765 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3766 aarch64_record.aarch64_mems[rec_no].len))
3767 ret = -1;
3768
3769 if (record_full_arch_list_add_end ())
3770 ret = -1;
3771 }
3772
3773 deallocate_reg_mem (&aarch64_record);
3774 return ret;
3775}
This page took 0.338737 seconds and 4 git commands to generate.