Add PowerPC64 ld --tls-get-addr-optimize.
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0
MS
59
60/* Pseudo register base numbers. */
61#define AARCH64_Q0_REGNUM 0
62#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
66
67/* The standard register names, and all the valid aliases for them. */
68static const struct
69{
70 const char *const name;
71 int regnum;
72} aarch64_register_aliases[] =
73{
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM},
76 {"lr", AARCH64_LR_REGNUM},
77 {"sp", AARCH64_SP_REGNUM},
78
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM + 0},
81 {"w1", AARCH64_X0_REGNUM + 1},
82 {"w2", AARCH64_X0_REGNUM + 2},
83 {"w3", AARCH64_X0_REGNUM + 3},
84 {"w4", AARCH64_X0_REGNUM + 4},
85 {"w5", AARCH64_X0_REGNUM + 5},
86 {"w6", AARCH64_X0_REGNUM + 6},
87 {"w7", AARCH64_X0_REGNUM + 7},
88 {"w8", AARCH64_X0_REGNUM + 8},
89 {"w9", AARCH64_X0_REGNUM + 9},
90 {"w10", AARCH64_X0_REGNUM + 10},
91 {"w11", AARCH64_X0_REGNUM + 11},
92 {"w12", AARCH64_X0_REGNUM + 12},
93 {"w13", AARCH64_X0_REGNUM + 13},
94 {"w14", AARCH64_X0_REGNUM + 14},
95 {"w15", AARCH64_X0_REGNUM + 15},
96 {"w16", AARCH64_X0_REGNUM + 16},
97 {"w17", AARCH64_X0_REGNUM + 17},
98 {"w18", AARCH64_X0_REGNUM + 18},
99 {"w19", AARCH64_X0_REGNUM + 19},
100 {"w20", AARCH64_X0_REGNUM + 20},
101 {"w21", AARCH64_X0_REGNUM + 21},
102 {"w22", AARCH64_X0_REGNUM + 22},
103 {"w23", AARCH64_X0_REGNUM + 23},
104 {"w24", AARCH64_X0_REGNUM + 24},
105 {"w25", AARCH64_X0_REGNUM + 25},
106 {"w26", AARCH64_X0_REGNUM + 26},
107 {"w27", AARCH64_X0_REGNUM + 27},
108 {"w28", AARCH64_X0_REGNUM + 28},
109 {"w29", AARCH64_X0_REGNUM + 29},
110 {"w30", AARCH64_X0_REGNUM + 30},
111
112 /* specials */
113 {"ip0", AARCH64_X0_REGNUM + 16},
114 {"ip1", AARCH64_X0_REGNUM + 17}
115};
116
117/* The required core 'R' registers. */
118static const char *const aarch64_r_register_names[] =
119{
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
130 "pc", "cpsr"
131};
132
133/* The FP/SIMD 'V' registers. */
134static const char *const aarch64_v_register_names[] =
135{
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
146 "fpsr",
147 "fpcr"
148};
149
150/* AArch64 prologue cache structure. */
151struct aarch64_prologue_cache
152{
db634143
PL
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
155 CORE_ADDR func;
156
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
159 stub frame. */
160 CORE_ADDR prev_pc;
161
07b287a0
MS
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
165 CORE_ADDR prev_sp;
166
7dfa3edc
PL
167 /* Is the target available to read from? */
168 int available_p;
169
07b287a0
MS
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
173 int framesize;
174
175 /* The register used to hold the frame pointer for this frame. */
176 int framereg;
177
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg *saved_regs;
180};
181
182/* Toggle this file's internal debugging dump. */
183static int aarch64_debug;
184
185static void
186show_aarch64_debug (struct ui_file *file, int from_tty,
187 struct cmd_list_element *c, const char *value)
188{
189 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
190}
191
192/* Extract a signed value from a bit field within an instruction
193 encoding.
194
195 INSN is the instruction opcode.
196
197 WIDTH specifies the width of the bit field to extract (in bits).
198
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
201
202static int32_t
203extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
204{
205 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
206 unsigned shift_r = sizeof (int32_t) * 8 - width;
207
208 return ((int32_t) insn << shift_l) >> shift_r;
209}
210
211/* Determine if specified bits within an instruction opcode matches a
212 specific pattern.
213
214 INSN is the instruction opcode.
215
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
218
219static int
220decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
221{
222 return (insn & mask) == pattern;
223}
224
225/* Decode an opcode if it represents an immediate ADD or SUB instruction.
226
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
231
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
233static int
234decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
235 int32_t *imm)
236{
237 if ((insn & 0x9f000000) == 0x91000000)
238 {
239 unsigned shift;
240 unsigned op_is_sub;
241
242 *rd = (insn >> 0) & 0x1f;
243 *rn = (insn >> 5) & 0x1f;
244 *imm = (insn >> 10) & 0xfff;
245 shift = (insn >> 22) & 0x3;
246 op_is_sub = (insn >> 30) & 0x1;
247
248 switch (shift)
249 {
250 case 0:
251 break;
252 case 1:
253 *imm <<= 12;
254 break;
255 default:
256 /* UNDEFINED */
257 return 0;
258 }
259
260 if (op_is_sub)
261 *imm = -*imm;
262
263 if (aarch64_debug)
b277c936
PL
264 {
265 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr), insn, *rd, *rn,
267 *imm);
268 }
07b287a0
MS
269 return 1;
270 }
271 return 0;
272}
273
274/* Decode an opcode if it represents an ADRP instruction.
275
276 ADDR specifies the address of the opcode.
277 INSN specifies the opcode to test.
278 RD receives the 'rd' field from the decoded instruction.
279
280 Return 1 if the opcodes matches and is decoded, otherwise 0. */
281
282static int
283decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
284{
285 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
286 {
287 *rd = (insn >> 0) & 0x1f;
288
289 if (aarch64_debug)
b277c936
PL
290 {
291 debug_printf ("decode: 0x%s 0x%x adrp x%u, #?\n",
292 core_addr_to_string_nz (addr), insn, *rd);
293 }
07b287a0
MS
294 return 1;
295 }
296 return 0;
297}
298
299/* Decode an opcode if it represents an branch immediate or branch
300 and link immediate instruction.
301
302 ADDR specifies the address of the opcode.
303 INSN specifies the opcode to test.
0ea6402e 304 IS_BL receives the 'op' bit from the decoded instruction.
07b287a0
MS
305 OFFSET receives the immediate offset from the decoded instruction.
306
307 Return 1 if the opcodes matches and is decoded, otherwise 0. */
308
309static int
0ea6402e 310decode_b (CORE_ADDR addr, uint32_t insn, int *is_bl, int32_t *offset)
07b287a0
MS
311{
312 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
313 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
314 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
315 {
0ea6402e 316 *is_bl = (insn >> 31) & 0x1;
07b287a0
MS
317 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
318
319 if (aarch64_debug)
b277c936
PL
320 {
321 debug_printf ("decode: 0x%s 0x%x %s 0x%s\n",
322 core_addr_to_string_nz (addr), insn,
323 *is_bl ? "bl" : "b",
324 core_addr_to_string_nz (addr + *offset));
325 }
07b287a0
MS
326
327 return 1;
328 }
329 return 0;
330}
331
332/* Decode an opcode if it represents a conditional branch instruction.
333
334 ADDR specifies the address of the opcode.
335 INSN specifies the opcode to test.
336 COND receives the branch condition field from the decoded
337 instruction.
338 OFFSET receives the immediate offset from the decoded instruction.
339
340 Return 1 if the opcodes matches and is decoded, otherwise 0. */
341
342static int
343decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
344{
4d50586d
PL
345 /* b.cond 0101 0100 iiii iiii iiii iiii iii0 cccc */
346 if (decode_masked_match (insn, 0xff000010, 0x54000000))
07b287a0
MS
347 {
348 *cond = (insn >> 0) & 0xf;
349 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
350
351 if (aarch64_debug)
b277c936
PL
352 {
353 debug_printf ("decode: 0x%s 0x%x b<%u> 0x%s\n",
354 core_addr_to_string_nz (addr), insn, *cond,
355 core_addr_to_string_nz (addr + *offset));
356 }
07b287a0
MS
357 return 1;
358 }
359 return 0;
360}
361
362/* Decode an opcode if it represents a branch via register instruction.
363
364 ADDR specifies the address of the opcode.
365 INSN specifies the opcode to test.
0ea6402e 366 IS_BLR receives the 'op' bit from the decoded instruction.
07b287a0
MS
367 RN receives the 'rn' field from the decoded instruction.
368
369 Return 1 if the opcodes matches and is decoded, otherwise 0. */
370
371static int
0ea6402e 372decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr, unsigned *rn)
07b287a0
MS
373{
374 /* 8 4 0 6 2 8 4 0 */
375 /* blr 110101100011111100000000000rrrrr */
376 /* br 110101100001111100000000000rrrrr */
377 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
378 {
0ea6402e 379 *is_blr = (insn >> 21) & 1;
07b287a0
MS
380 *rn = (insn >> 5) & 0x1f;
381
382 if (aarch64_debug)
b277c936
PL
383 {
384 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
385 core_addr_to_string_nz (addr), insn,
386 *is_blr ? "blr" : "br", *rn);
387 }
07b287a0
MS
388
389 return 1;
390 }
391 return 0;
392}
393
394/* Decode an opcode if it represents a CBZ or CBNZ instruction.
395
396 ADDR specifies the address of the opcode.
397 INSN specifies the opcode to test.
398 IS64 receives the 'sf' field from the decoded instruction.
0ea6402e 399 IS_CBNZ receives the 'op' field from the decoded instruction.
07b287a0
MS
400 RN receives the 'rn' field from the decoded instruction.
401 OFFSET receives the 'imm19' field from the decoded instruction.
402
403 Return 1 if the opcodes matches and is decoded, otherwise 0. */
404
405static int
0ea6402e
PL
406decode_cb (CORE_ADDR addr, uint32_t insn, int *is64, int *is_cbnz,
407 unsigned *rn, int32_t *offset)
07b287a0 408{
669e74e8
PL
409 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
410 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
07b287a0
MS
411 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
412 {
07b287a0
MS
413 *rn = (insn >> 0) & 0x1f;
414 *is64 = (insn >> 31) & 0x1;
0ea6402e 415 *is_cbnz = (insn >> 24) & 0x1;
07b287a0
MS
416 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
417
418 if (aarch64_debug)
b277c936
PL
419 {
420 debug_printf ("decode: 0x%s 0x%x %s 0x%s\n",
421 core_addr_to_string_nz (addr), insn,
422 *is_cbnz ? "cbnz" : "cbz",
423 core_addr_to_string_nz (addr + *offset));
424 }
07b287a0
MS
425 return 1;
426 }
427 return 0;
428}
429
430/* Decode an opcode if it represents a ERET instruction.
431
432 ADDR specifies the address of the opcode.
433 INSN specifies the opcode to test.
434
435 Return 1 if the opcodes matches and is decoded, otherwise 0. */
436
437static int
438decode_eret (CORE_ADDR addr, uint32_t insn)
439{
440 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
441 if (insn == 0xd69f03e0)
442 {
443 if (aarch64_debug)
b277c936
PL
444 {
445 debug_printf ("decode: 0x%s 0x%x eret\n",
446 core_addr_to_string_nz (addr), insn);
447 }
07b287a0
MS
448 return 1;
449 }
450 return 0;
451}
452
453/* Decode an opcode if it represents a MOVZ instruction.
454
455 ADDR specifies the address of the opcode.
456 INSN specifies the opcode to test.
457 RD receives the 'rd' field from the decoded instruction.
458
459 Return 1 if the opcodes matches and is decoded, otherwise 0. */
460
461static int
462decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
463{
464 if (decode_masked_match (insn, 0xff800000, 0x52800000))
465 {
466 *rd = (insn >> 0) & 0x1f;
467
468 if (aarch64_debug)
b277c936
PL
469 {
470 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
471 core_addr_to_string_nz (addr), insn, *rd);
472 }
07b287a0
MS
473 return 1;
474 }
475 return 0;
476}
477
478/* Decode an opcode if it represents a ORR (shifted register)
479 instruction.
480
481 ADDR specifies the address of the opcode.
482 INSN specifies the opcode to test.
483 RD receives the 'rd' field from the decoded instruction.
484 RN receives the 'rn' field from the decoded instruction.
485 RM receives the 'rm' field from the decoded instruction.
486 IMM receives the 'imm6' field from the decoded instruction.
487
488 Return 1 if the opcodes matches and is decoded, otherwise 0. */
489
490static int
491decode_orr_shifted_register_x (CORE_ADDR addr,
492 uint32_t insn, unsigned *rd, unsigned *rn,
493 unsigned *rm, int32_t *imm)
494{
495 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
496 {
497 *rd = (insn >> 0) & 0x1f;
498 *rn = (insn >> 5) & 0x1f;
499 *rm = (insn >> 16) & 0x1f;
500 *imm = (insn >> 10) & 0x3f;
501
502 if (aarch64_debug)
b277c936
PL
503 {
504 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
505 core_addr_to_string_nz (addr), insn, *rd, *rn,
506 *rm, *imm);
507 }
07b287a0
MS
508 return 1;
509 }
510 return 0;
511}
512
513/* Decode an opcode if it represents a RET instruction.
514
515 ADDR specifies the address of the opcode.
516 INSN specifies the opcode to test.
517 RN receives the 'rn' field from the decoded instruction.
518
519 Return 1 if the opcodes matches and is decoded, otherwise 0. */
520
521static int
522decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
523{
524 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
525 {
526 *rn = (insn >> 5) & 0x1f;
527 if (aarch64_debug)
b277c936
PL
528 {
529 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
530 core_addr_to_string_nz (addr), insn, *rn);
531 }
07b287a0
MS
532 return 1;
533 }
534 return 0;
535}
536
537/* Decode an opcode if it represents the following instruction:
538 STP rt, rt2, [rn, #imm]
539
540 ADDR specifies the address of the opcode.
541 INSN specifies the opcode to test.
542 RT1 receives the 'rt' field from the decoded instruction.
543 RT2 receives the 'rt2' field from the decoded instruction.
544 RN receives the 'rn' field from the decoded instruction.
545 IMM receives the 'imm' field from the decoded instruction.
546
547 Return 1 if the opcodes matches and is decoded, otherwise 0. */
548
549static int
550decode_stp_offset (CORE_ADDR addr,
551 uint32_t insn,
552 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
553{
554 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
555 {
556 *rt1 = (insn >> 0) & 0x1f;
557 *rn = (insn >> 5) & 0x1f;
558 *rt2 = (insn >> 10) & 0x1f;
559 *imm = extract_signed_bitfield (insn, 7, 15);
560 *imm <<= 3;
561
562 if (aarch64_debug)
b277c936
PL
563 {
564 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
565 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
566 *rn, *imm);
567 }
07b287a0
MS
568 return 1;
569 }
570 return 0;
571}
572
573/* Decode an opcode if it represents the following instruction:
574 STP rt, rt2, [rn, #imm]!
575
576 ADDR specifies the address of the opcode.
577 INSN specifies the opcode to test.
578 RT1 receives the 'rt' field from the decoded instruction.
579 RT2 receives the 'rt2' field from the decoded instruction.
580 RN receives the 'rn' field from the decoded instruction.
581 IMM receives the 'imm' field from the decoded instruction.
582
583 Return 1 if the opcodes matches and is decoded, otherwise 0. */
584
585static int
586decode_stp_offset_wb (CORE_ADDR addr,
587 uint32_t insn,
588 unsigned *rt1, unsigned *rt2, unsigned *rn,
589 int32_t *imm)
590{
591 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
592 {
593 *rt1 = (insn >> 0) & 0x1f;
594 *rn = (insn >> 5) & 0x1f;
595 *rt2 = (insn >> 10) & 0x1f;
596 *imm = extract_signed_bitfield (insn, 7, 15);
597 *imm <<= 3;
598
599 if (aarch64_debug)
b277c936
PL
600 {
601 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
602 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
603 *rn, *imm);
604 }
07b287a0
MS
605 return 1;
606 }
607 return 0;
608}
609
610/* Decode an opcode if it represents the following instruction:
611 STUR rt, [rn, #imm]
612
613 ADDR specifies the address of the opcode.
614 INSN specifies the opcode to test.
615 IS64 receives size field from the decoded instruction.
616 RT receives the 'rt' field from the decoded instruction.
617 RN receives the 'rn' field from the decoded instruction.
618 IMM receives the 'imm' field from the decoded instruction.
619
620 Return 1 if the opcodes matches and is decoded, otherwise 0. */
621
622static int
623decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
624 unsigned *rn, int32_t *imm)
625{
626 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
627 {
628 *is64 = (insn >> 30) & 1;
629 *rt = (insn >> 0) & 0x1f;
630 *rn = (insn >> 5) & 0x1f;
631 *imm = extract_signed_bitfield (insn, 9, 12);
632
633 if (aarch64_debug)
b277c936
PL
634 {
635 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
636 core_addr_to_string_nz (addr), insn,
637 *is64 ? 'x' : 'w', *rt, *rn, *imm);
638 }
07b287a0
MS
639 return 1;
640 }
641 return 0;
642}
643
669e74e8 644/* Decode an opcode if it represents a TBZ or TBNZ instruction.
07b287a0
MS
645
646 ADDR specifies the address of the opcode.
647 INSN specifies the opcode to test.
0ea6402e 648 IS_TBNZ receives the 'op' field from the decoded instruction.
07b287a0
MS
649 BIT receives the bit position field from the decoded instruction.
650 RT receives 'rt' field from the decoded instruction.
651 IMM receives 'imm' field from the decoded instruction.
652
653 Return 1 if the opcodes matches and is decoded, otherwise 0. */
654
655static int
0ea6402e
PL
656decode_tb (CORE_ADDR addr, uint32_t insn, int *is_tbnz, unsigned *bit,
657 unsigned *rt, int32_t *imm)
07b287a0 658{
669e74e8
PL
659 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
660 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
07b287a0
MS
661 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
662 {
07b287a0 663 *rt = (insn >> 0) & 0x1f;
0ea6402e 664 *is_tbnz = (insn >> 24) & 0x1;
07b287a0
MS
665 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
666 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
667
668 if (aarch64_debug)
b277c936
PL
669 {
670 debug_printf ("decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
671 core_addr_to_string_nz (addr), insn,
672 *is_tbnz ? "tbnz" : "tbz", *rt, *bit,
673 core_addr_to_string_nz (addr + *imm));
674 }
07b287a0
MS
675 return 1;
676 }
677 return 0;
678}
679
680/* Analyze a prologue, looking for a recognizable stack frame
681 and frame pointer. Scan until we encounter a store that could
682 clobber the stack frame unexpectedly, or an unknown instruction. */
683
684static CORE_ADDR
685aarch64_analyze_prologue (struct gdbarch *gdbarch,
686 CORE_ADDR start, CORE_ADDR limit,
687 struct aarch64_prologue_cache *cache)
688{
689 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
690 int i;
691 pv_t regs[AARCH64_X_REGISTER_COUNT];
692 struct pv_area *stack;
693 struct cleanup *back_to;
694
695 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
696 regs[i] = pv_register (i, 0);
697 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
698 back_to = make_cleanup_free_pv_area (stack);
699
700 for (; start < limit; start += 4)
701 {
702 uint32_t insn;
703 unsigned rd;
704 unsigned rn;
705 unsigned rm;
706 unsigned rt;
707 unsigned rt1;
708 unsigned rt2;
709 int op_is_sub;
710 int32_t imm;
711 unsigned cond;
96b32e50 712 int is64;
0ea6402e
PL
713 int is_link;
714 int is_cbnz;
715 int is_tbnz;
07b287a0
MS
716 unsigned bit;
717 int32_t offset;
718
719 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
720
721 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
722 regs[rd] = pv_add_constant (regs[rn], imm);
723 else if (decode_adrp (start, insn, &rd))
724 regs[rd] = pv_unknown ();
725 else if (decode_b (start, insn, &is_link, &offset))
726 {
727 /* Stop analysis on branch. */
728 break;
729 }
730 else if (decode_bcond (start, insn, &cond, &offset))
731 {
732 /* Stop analysis on branch. */
733 break;
734 }
735 else if (decode_br (start, insn, &is_link, &rn))
736 {
737 /* Stop analysis on branch. */
738 break;
739 }
0ea6402e 740 else if (decode_cb (start, insn, &is64, &is_cbnz, &rn, &offset))
07b287a0
MS
741 {
742 /* Stop analysis on branch. */
743 break;
744 }
745 else if (decode_eret (start, insn))
746 {
747 /* Stop analysis on branch. */
748 break;
749 }
750 else if (decode_movz (start, insn, &rd))
751 regs[rd] = pv_unknown ();
752 else
753 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
754 {
755 if (imm == 0 && rn == 31)
756 regs[rd] = regs[rm];
757 else
758 {
759 if (aarch64_debug)
b277c936
PL
760 {
761 debug_printf ("aarch64: prologue analysis gave up "
762 "addr=0x%s opcode=0x%x (orr x register)\n",
763 core_addr_to_string_nz (start), insn);
764 }
07b287a0
MS
765 break;
766 }
767 }
768 else if (decode_ret (start, insn, &rn))
769 {
770 /* Stop analysis on branch. */
771 break;
772 }
773 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
774 {
775 pv_area_store (stack, pv_add_constant (regs[rn], offset),
776 is64 ? 8 : 4, regs[rt]);
777 }
778 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
779 {
780 /* If recording this store would invalidate the store area
781 (perhaps because rn is not known) then we should abandon
782 further prologue analysis. */
783 if (pv_area_store_would_trash (stack,
784 pv_add_constant (regs[rn], imm)))
785 break;
786
787 if (pv_area_store_would_trash (stack,
788 pv_add_constant (regs[rn], imm + 8)))
789 break;
790
791 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
792 regs[rt1]);
793 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
794 regs[rt2]);
795 }
796 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
797 {
798 /* If recording this store would invalidate the store area
799 (perhaps because rn is not known) then we should abandon
800 further prologue analysis. */
801 if (pv_area_store_would_trash (stack,
14ac654f
MS
802 pv_add_constant (regs[rn], imm)))
803 break;
804
805 if (pv_area_store_would_trash (stack,
07b287a0
MS
806 pv_add_constant (regs[rn], imm + 8)))
807 break;
808
809 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
810 regs[rt1]);
811 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
812 regs[rt2]);
813 regs[rn] = pv_add_constant (regs[rn], imm);
814 }
0ea6402e 815 else if (decode_tb (start, insn, &is_tbnz, &bit, &rn, &offset))
07b287a0
MS
816 {
817 /* Stop analysis on branch. */
818 break;
819 }
820 else
821 {
822 if (aarch64_debug)
b277c936
PL
823 {
824 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
825 " opcode=0x%x\n",
826 core_addr_to_string_nz (start), insn);
827 }
07b287a0
MS
828 break;
829 }
830 }
831
832 if (cache == NULL)
833 {
834 do_cleanups (back_to);
835 return start;
836 }
837
838 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
839 {
840 /* Frame pointer is fp. Frame size is constant. */
841 cache->framereg = AARCH64_FP_REGNUM;
842 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
843 }
844 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
845 {
846 /* Try the stack pointer. */
847 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
848 cache->framereg = AARCH64_SP_REGNUM;
849 }
850 else
851 {
852 /* We're just out of luck. We don't know where the frame is. */
853 cache->framereg = -1;
854 cache->framesize = 0;
855 }
856
857 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
858 {
859 CORE_ADDR offset;
860
861 if (pv_area_find_reg (stack, gdbarch, i, &offset))
862 cache->saved_regs[i].addr = offset;
863 }
864
865 do_cleanups (back_to);
866 return start;
867}
868
869/* Implement the "skip_prologue" gdbarch method. */
870
871static CORE_ADDR
872aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
873{
874 unsigned long inst;
875 CORE_ADDR skip_pc;
876 CORE_ADDR func_addr, limit_pc;
877 struct symtab_and_line sal;
878
879 /* See if we can determine the end of the prologue via the symbol
880 table. If so, then return either PC, or the PC after the
881 prologue, whichever is greater. */
882 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
883 {
884 CORE_ADDR post_prologue_pc
885 = skip_prologue_using_sal (gdbarch, func_addr);
886
887 if (post_prologue_pc != 0)
888 return max (pc, post_prologue_pc);
889 }
890
891 /* Can't determine prologue from the symbol table, need to examine
892 instructions. */
893
894 /* Find an upper limit on the function prologue using the debug
895 information. If the debug information could not be used to
896 provide that bound, then use an arbitrary large number as the
897 upper bound. */
898 limit_pc = skip_prologue_using_sal (gdbarch, pc);
899 if (limit_pc == 0)
900 limit_pc = pc + 128; /* Magic. */
901
902 /* Try disassembling prologue. */
903 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
904}
905
906/* Scan the function prologue for THIS_FRAME and populate the prologue
907 cache CACHE. */
908
909static void
910aarch64_scan_prologue (struct frame_info *this_frame,
911 struct aarch64_prologue_cache *cache)
912{
913 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
914 CORE_ADDR prologue_start;
915 CORE_ADDR prologue_end;
916 CORE_ADDR prev_pc = get_frame_pc (this_frame);
917 struct gdbarch *gdbarch = get_frame_arch (this_frame);
918
db634143
PL
919 cache->prev_pc = prev_pc;
920
07b287a0
MS
921 /* Assume we do not find a frame. */
922 cache->framereg = -1;
923 cache->framesize = 0;
924
925 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
926 &prologue_end))
927 {
928 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
929
930 if (sal.line == 0)
931 {
932 /* No line info so use the current PC. */
933 prologue_end = prev_pc;
934 }
935 else if (sal.end < prologue_end)
936 {
937 /* The next line begins after the function end. */
938 prologue_end = sal.end;
939 }
940
941 prologue_end = min (prologue_end, prev_pc);
942 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
943 }
944 else
945 {
946 CORE_ADDR frame_loc;
947 LONGEST saved_fp;
948 LONGEST saved_lr;
949 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
950
951 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
952 if (frame_loc == 0)
953 return;
954
955 cache->framereg = AARCH64_FP_REGNUM;
956 cache->framesize = 16;
957 cache->saved_regs[29].addr = 0;
958 cache->saved_regs[30].addr = 8;
959 }
960}
961
7dfa3edc
PL
962/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
963 function may throw an exception if the inferior's registers or memory is
964 not available. */
07b287a0 965
7dfa3edc
PL
966static void
967aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
968 struct aarch64_prologue_cache *cache)
07b287a0 969{
07b287a0
MS
970 CORE_ADDR unwound_fp;
971 int reg;
972
07b287a0
MS
973 aarch64_scan_prologue (this_frame, cache);
974
975 if (cache->framereg == -1)
7dfa3edc 976 return;
07b287a0
MS
977
978 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
979 if (unwound_fp == 0)
7dfa3edc 980 return;
07b287a0
MS
981
982 cache->prev_sp = unwound_fp + cache->framesize;
983
984 /* Calculate actual addresses of saved registers using offsets
985 determined by aarch64_analyze_prologue. */
986 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
987 if (trad_frame_addr_p (cache->saved_regs, reg))
988 cache->saved_regs[reg].addr += cache->prev_sp;
989
db634143
PL
990 cache->func = get_frame_func (this_frame);
991
7dfa3edc
PL
992 cache->available_p = 1;
993}
994
995/* Allocate and fill in *THIS_CACHE with information about the prologue of
996 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
997 Return a pointer to the current aarch64_prologue_cache in
998 *THIS_CACHE. */
999
1000static struct aarch64_prologue_cache *
1001aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1002{
1003 struct aarch64_prologue_cache *cache;
1004
1005 if (*this_cache != NULL)
1006 return *this_cache;
1007
1008 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1009 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1010 *this_cache = cache;
1011
1012 TRY
1013 {
1014 aarch64_make_prologue_cache_1 (this_frame, cache);
1015 }
1016 CATCH (ex, RETURN_MASK_ERROR)
1017 {
1018 if (ex.error != NOT_AVAILABLE_ERROR)
1019 throw_exception (ex);
1020 }
1021 END_CATCH
1022
07b287a0
MS
1023 return cache;
1024}
1025
7dfa3edc
PL
1026/* Implement the "stop_reason" frame_unwind method. */
1027
1028static enum unwind_stop_reason
1029aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1030 void **this_cache)
1031{
1032 struct aarch64_prologue_cache *cache
1033 = aarch64_make_prologue_cache (this_frame, this_cache);
1034
1035 if (!cache->available_p)
1036 return UNWIND_UNAVAILABLE;
1037
1038 /* Halt the backtrace at "_start". */
1039 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1040 return UNWIND_OUTERMOST;
1041
1042 /* We've hit a wall, stop. */
1043 if (cache->prev_sp == 0)
1044 return UNWIND_OUTERMOST;
1045
1046 return UNWIND_NO_REASON;
1047}
1048
07b287a0
MS
1049/* Our frame ID for a normal frame is the current function's starting
1050 PC and the caller's SP when we were called. */
1051
1052static void
1053aarch64_prologue_this_id (struct frame_info *this_frame,
1054 void **this_cache, struct frame_id *this_id)
1055{
7c8edfae
PL
1056 struct aarch64_prologue_cache *cache
1057 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1058
7dfa3edc
PL
1059 if (!cache->available_p)
1060 *this_id = frame_id_build_unavailable_stack (cache->func);
1061 else
1062 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1063}
1064
1065/* Implement the "prev_register" frame_unwind method. */
1066
1067static struct value *
1068aarch64_prologue_prev_register (struct frame_info *this_frame,
1069 void **this_cache, int prev_regnum)
1070{
1071 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
1072 struct aarch64_prologue_cache *cache
1073 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1074
1075 /* If we are asked to unwind the PC, then we need to return the LR
1076 instead. The prologue may save PC, but it will point into this
1077 frame's prologue, not the next frame's resume location. */
1078 if (prev_regnum == AARCH64_PC_REGNUM)
1079 {
1080 CORE_ADDR lr;
1081
1082 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1083 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1084 }
1085
1086 /* SP is generally not saved to the stack, but this frame is
1087 identified by the next frame's stack pointer at the time of the
1088 call. The value was already reconstructed into PREV_SP. */
1089 /*
1090 +----------+ ^
1091 | saved lr | |
1092 +->| saved fp |--+
1093 | | |
1094 | | | <- Previous SP
1095 | +----------+
1096 | | saved lr |
1097 +--| saved fp |<- FP
1098 | |
1099 | |<- SP
1100 +----------+ */
1101 if (prev_regnum == AARCH64_SP_REGNUM)
1102 return frame_unwind_got_constant (this_frame, prev_regnum,
1103 cache->prev_sp);
1104
1105 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1106 prev_regnum);
1107}
1108
1109/* AArch64 prologue unwinder. */
1110struct frame_unwind aarch64_prologue_unwind =
1111{
1112 NORMAL_FRAME,
7dfa3edc 1113 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1114 aarch64_prologue_this_id,
1115 aarch64_prologue_prev_register,
1116 NULL,
1117 default_frame_sniffer
1118};
1119
8b61f75d
PL
1120/* Allocate and fill in *THIS_CACHE with information about the prologue of
1121 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1122 Return a pointer to the current aarch64_prologue_cache in
1123 *THIS_CACHE. */
07b287a0
MS
1124
1125static struct aarch64_prologue_cache *
8b61f75d 1126aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 1127{
07b287a0 1128 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1129
1130 if (*this_cache != NULL)
1131 return *this_cache;
07b287a0
MS
1132
1133 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1134 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1135 *this_cache = cache;
07b287a0 1136
02a2a705
PL
1137 TRY
1138 {
1139 cache->prev_sp = get_frame_register_unsigned (this_frame,
1140 AARCH64_SP_REGNUM);
1141 cache->prev_pc = get_frame_pc (this_frame);
1142 cache->available_p = 1;
1143 }
1144 CATCH (ex, RETURN_MASK_ERROR)
1145 {
1146 if (ex.error != NOT_AVAILABLE_ERROR)
1147 throw_exception (ex);
1148 }
1149 END_CATCH
07b287a0
MS
1150
1151 return cache;
1152}
1153
02a2a705
PL
1154/* Implement the "stop_reason" frame_unwind method. */
1155
1156static enum unwind_stop_reason
1157aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1158 void **this_cache)
1159{
1160 struct aarch64_prologue_cache *cache
1161 = aarch64_make_stub_cache (this_frame, this_cache);
1162
1163 if (!cache->available_p)
1164 return UNWIND_UNAVAILABLE;
1165
1166 return UNWIND_NO_REASON;
1167}
1168
07b287a0
MS
1169/* Our frame ID for a stub frame is the current SP and LR. */
1170
1171static void
1172aarch64_stub_this_id (struct frame_info *this_frame,
1173 void **this_cache, struct frame_id *this_id)
1174{
8b61f75d
PL
1175 struct aarch64_prologue_cache *cache
1176 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1177
02a2a705
PL
1178 if (cache->available_p)
1179 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1180 else
1181 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1182}
1183
1184/* Implement the "sniffer" frame_unwind method. */
1185
1186static int
1187aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1188 struct frame_info *this_frame,
1189 void **this_prologue_cache)
1190{
1191 CORE_ADDR addr_in_block;
1192 gdb_byte dummy[4];
1193
1194 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1195 if (in_plt_section (addr_in_block)
07b287a0
MS
1196 /* We also use the stub winder if the target memory is unreadable
1197 to avoid having the prologue unwinder trying to read it. */
1198 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1199 return 1;
1200
1201 return 0;
1202}
1203
1204/* AArch64 stub unwinder. */
1205struct frame_unwind aarch64_stub_unwind =
1206{
1207 NORMAL_FRAME,
02a2a705 1208 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1209 aarch64_stub_this_id,
1210 aarch64_prologue_prev_register,
1211 NULL,
1212 aarch64_stub_unwind_sniffer
1213};
1214
1215/* Return the frame base address of *THIS_FRAME. */
1216
1217static CORE_ADDR
1218aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1219{
7c8edfae
PL
1220 struct aarch64_prologue_cache *cache
1221 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1222
1223 return cache->prev_sp - cache->framesize;
1224}
1225
1226/* AArch64 default frame base information. */
1227struct frame_base aarch64_normal_base =
1228{
1229 &aarch64_prologue_unwind,
1230 aarch64_normal_frame_base,
1231 aarch64_normal_frame_base,
1232 aarch64_normal_frame_base
1233};
1234
1235/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1236 dummy frame. The frame ID's base needs to match the TOS value
1237 saved by save_dummy_frame_tos () and returned from
1238 aarch64_push_dummy_call, and the PC needs to match the dummy
1239 frame's breakpoint. */
1240
1241static struct frame_id
1242aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1243{
1244 return frame_id_build (get_frame_register_unsigned (this_frame,
1245 AARCH64_SP_REGNUM),
1246 get_frame_pc (this_frame));
1247}
1248
1249/* Implement the "unwind_pc" gdbarch method. */
1250
1251static CORE_ADDR
1252aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1253{
1254 CORE_ADDR pc
1255 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1256
1257 return pc;
1258}
1259
1260/* Implement the "unwind_sp" gdbarch method. */
1261
1262static CORE_ADDR
1263aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1264{
1265 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1266}
1267
1268/* Return the value of the REGNUM register in the previous frame of
1269 *THIS_FRAME. */
1270
1271static struct value *
1272aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1273 void **this_cache, int regnum)
1274{
1275 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1276 CORE_ADDR lr;
1277
1278 switch (regnum)
1279 {
1280 case AARCH64_PC_REGNUM:
1281 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1282 return frame_unwind_got_constant (this_frame, regnum, lr);
1283
1284 default:
1285 internal_error (__FILE__, __LINE__,
1286 _("Unexpected register %d"), regnum);
1287 }
1288}
1289
1290/* Implement the "init_reg" dwarf2_frame_ops method. */
1291
1292static void
1293aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1294 struct dwarf2_frame_state_reg *reg,
1295 struct frame_info *this_frame)
1296{
1297 switch (regnum)
1298 {
1299 case AARCH64_PC_REGNUM:
1300 reg->how = DWARF2_FRAME_REG_FN;
1301 reg->loc.fn = aarch64_dwarf2_prev_register;
1302 break;
1303 case AARCH64_SP_REGNUM:
1304 reg->how = DWARF2_FRAME_REG_CFA;
1305 break;
1306 }
1307}
1308
1309/* When arguments must be pushed onto the stack, they go on in reverse
1310 order. The code below implements a FILO (stack) to do this. */
1311
1312typedef struct
1313{
1314 /* Value to pass on stack. */
1315 const void *data;
1316
1317 /* Size in bytes of value to pass on stack. */
1318 int len;
1319} stack_item_t;
1320
1321DEF_VEC_O (stack_item_t);
1322
1323/* Return the alignment (in bytes) of the given type. */
1324
1325static int
1326aarch64_type_align (struct type *t)
1327{
1328 int n;
1329 int align;
1330 int falign;
1331
1332 t = check_typedef (t);
1333 switch (TYPE_CODE (t))
1334 {
1335 default:
1336 /* Should never happen. */
1337 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1338 return 4;
1339
1340 case TYPE_CODE_PTR:
1341 case TYPE_CODE_ENUM:
1342 case TYPE_CODE_INT:
1343 case TYPE_CODE_FLT:
1344 case TYPE_CODE_SET:
1345 case TYPE_CODE_RANGE:
1346 case TYPE_CODE_BITSTRING:
1347 case TYPE_CODE_REF:
1348 case TYPE_CODE_CHAR:
1349 case TYPE_CODE_BOOL:
1350 return TYPE_LENGTH (t);
1351
1352 case TYPE_CODE_ARRAY:
1353 case TYPE_CODE_COMPLEX:
1354 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1355
1356 case TYPE_CODE_STRUCT:
1357 case TYPE_CODE_UNION:
1358 align = 1;
1359 for (n = 0; n < TYPE_NFIELDS (t); n++)
1360 {
1361 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1362 if (falign > align)
1363 align = falign;
1364 }
1365 return align;
1366 }
1367}
1368
1369/* Return 1 if *TY is a homogeneous floating-point aggregate as
1370 defined in the AAPCS64 ABI document; otherwise return 0. */
1371
1372static int
1373is_hfa (struct type *ty)
1374{
1375 switch (TYPE_CODE (ty))
1376 {
1377 case TYPE_CODE_ARRAY:
1378 {
1379 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1380 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1381 return 1;
1382 break;
1383 }
1384
1385 case TYPE_CODE_UNION:
1386 case TYPE_CODE_STRUCT:
1387 {
1388 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1389 {
1390 struct type *member0_type;
1391
1392 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1393 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1394 {
1395 int i;
1396
1397 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1398 {
1399 struct type *member1_type;
1400
1401 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1402 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1403 || (TYPE_LENGTH (member0_type)
1404 != TYPE_LENGTH (member1_type)))
1405 return 0;
1406 }
1407 return 1;
1408 }
1409 }
1410 return 0;
1411 }
1412
1413 default:
1414 break;
1415 }
1416
1417 return 0;
1418}
1419
1420/* AArch64 function call information structure. */
1421struct aarch64_call_info
1422{
1423 /* the current argument number. */
1424 unsigned argnum;
1425
1426 /* The next general purpose register number, equivalent to NGRN as
1427 described in the AArch64 Procedure Call Standard. */
1428 unsigned ngrn;
1429
1430 /* The next SIMD and floating point register number, equivalent to
1431 NSRN as described in the AArch64 Procedure Call Standard. */
1432 unsigned nsrn;
1433
1434 /* The next stacked argument address, equivalent to NSAA as
1435 described in the AArch64 Procedure Call Standard. */
1436 unsigned nsaa;
1437
1438 /* Stack item vector. */
1439 VEC(stack_item_t) *si;
1440};
1441
1442/* Pass a value in a sequence of consecutive X registers. The caller
1443 is responsbile for ensuring sufficient registers are available. */
1444
1445static void
1446pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1447 struct aarch64_call_info *info, struct type *type,
1448 const bfd_byte *buf)
1449{
1450 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1451 int len = TYPE_LENGTH (type);
1452 enum type_code typecode = TYPE_CODE (type);
1453 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1454
1455 info->argnum++;
1456
1457 while (len > 0)
1458 {
1459 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1460 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1461 byte_order);
1462
1463
1464 /* Adjust sub-word struct/union args when big-endian. */
1465 if (byte_order == BFD_ENDIAN_BIG
1466 && partial_len < X_REGISTER_SIZE
1467 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1468 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1469
1470 if (aarch64_debug)
b277c936
PL
1471 {
1472 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1473 gdbarch_register_name (gdbarch, regnum),
1474 phex (regval, X_REGISTER_SIZE));
1475 }
07b287a0
MS
1476 regcache_cooked_write_unsigned (regcache, regnum, regval);
1477 len -= partial_len;
1478 buf += partial_len;
1479 regnum++;
1480 }
1481}
1482
1483/* Attempt to marshall a value in a V register. Return 1 if
1484 successful, or 0 if insufficient registers are available. This
1485 function, unlike the equivalent pass_in_x() function does not
1486 handle arguments spread across multiple registers. */
1487
1488static int
1489pass_in_v (struct gdbarch *gdbarch,
1490 struct regcache *regcache,
1491 struct aarch64_call_info *info,
1492 const bfd_byte *buf)
1493{
1494 if (info->nsrn < 8)
1495 {
1496 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1497 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1498
1499 info->argnum++;
1500 info->nsrn++;
1501
1502 regcache_cooked_write (regcache, regnum, buf);
1503 if (aarch64_debug)
b277c936
PL
1504 {
1505 debug_printf ("arg %d in %s\n", info->argnum,
1506 gdbarch_register_name (gdbarch, regnum));
1507 }
07b287a0
MS
1508 return 1;
1509 }
1510 info->nsrn = 8;
1511 return 0;
1512}
1513
1514/* Marshall an argument onto the stack. */
1515
1516static void
1517pass_on_stack (struct aarch64_call_info *info, struct type *type,
1518 const bfd_byte *buf)
1519{
1520 int len = TYPE_LENGTH (type);
1521 int align;
1522 stack_item_t item;
1523
1524 info->argnum++;
1525
1526 align = aarch64_type_align (type);
1527
1528 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1529 Natural alignment of the argument's type. */
1530 align = align_up (align, 8);
1531
1532 /* The AArch64 PCS requires at most doubleword alignment. */
1533 if (align > 16)
1534 align = 16;
1535
1536 if (aarch64_debug)
b277c936
PL
1537 {
1538 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1539 info->nsaa);
1540 }
07b287a0
MS
1541
1542 item.len = len;
1543 item.data = buf;
1544 VEC_safe_push (stack_item_t, info->si, &item);
1545
1546 info->nsaa += len;
1547 if (info->nsaa & (align - 1))
1548 {
1549 /* Push stack alignment padding. */
1550 int pad = align - (info->nsaa & (align - 1));
1551
1552 item.len = pad;
1553 item.data = buf;
1554
1555 VEC_safe_push (stack_item_t, info->si, &item);
1556 info->nsaa += pad;
1557 }
1558}
1559
1560/* Marshall an argument into a sequence of one or more consecutive X
1561 registers or, if insufficient X registers are available then onto
1562 the stack. */
1563
1564static void
1565pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1566 struct aarch64_call_info *info, struct type *type,
1567 const bfd_byte *buf)
1568{
1569 int len = TYPE_LENGTH (type);
1570 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1571
1572 /* PCS C.13 - Pass in registers if we have enough spare */
1573 if (info->ngrn + nregs <= 8)
1574 {
1575 pass_in_x (gdbarch, regcache, info, type, buf);
1576 info->ngrn += nregs;
1577 }
1578 else
1579 {
1580 info->ngrn = 8;
1581 pass_on_stack (info, type, buf);
1582 }
1583}
1584
1585/* Pass a value in a V register, or on the stack if insufficient are
1586 available. */
1587
1588static void
1589pass_in_v_or_stack (struct gdbarch *gdbarch,
1590 struct regcache *regcache,
1591 struct aarch64_call_info *info,
1592 struct type *type,
1593 const bfd_byte *buf)
1594{
1595 if (!pass_in_v (gdbarch, regcache, info, buf))
1596 pass_on_stack (info, type, buf);
1597}
1598
1599/* Implement the "push_dummy_call" gdbarch method. */
1600
1601static CORE_ADDR
1602aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1603 struct regcache *regcache, CORE_ADDR bp_addr,
1604 int nargs,
1605 struct value **args, CORE_ADDR sp, int struct_return,
1606 CORE_ADDR struct_addr)
1607{
1608 int nstack = 0;
1609 int argnum;
1610 int x_argreg;
1611 int v_argreg;
1612 struct aarch64_call_info info;
1613 struct type *func_type;
1614 struct type *return_type;
1615 int lang_struct_return;
1616
1617 memset (&info, 0, sizeof (info));
1618
1619 /* We need to know what the type of the called function is in order
1620 to determine the number of named/anonymous arguments for the
1621 actual argument placement, and the return type in order to handle
1622 return value correctly.
1623
1624 The generic code above us views the decision of return in memory
1625 or return in registers as a two stage processes. The language
1626 handler is consulted first and may decide to return in memory (eg
1627 class with copy constructor returned by value), this will cause
1628 the generic code to allocate space AND insert an initial leading
1629 argument.
1630
1631 If the language code does not decide to pass in memory then the
1632 target code is consulted.
1633
1634 If the language code decides to pass in memory we want to move
1635 the pointer inserted as the initial argument from the argument
1636 list and into X8, the conventional AArch64 struct return pointer
1637 register.
1638
1639 This is slightly awkward, ideally the flag "lang_struct_return"
1640 would be passed to the targets implementation of push_dummy_call.
1641 Rather that change the target interface we call the language code
1642 directly ourselves. */
1643
1644 func_type = check_typedef (value_type (function));
1645
1646 /* Dereference function pointer types. */
1647 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1648 func_type = TYPE_TARGET_TYPE (func_type);
1649
1650 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1651 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1652
1653 /* If language_pass_by_reference () returned true we will have been
1654 given an additional initial argument, a hidden pointer to the
1655 return slot in memory. */
1656 return_type = TYPE_TARGET_TYPE (func_type);
1657 lang_struct_return = language_pass_by_reference (return_type);
1658
1659 /* Set the return address. For the AArch64, the return breakpoint
1660 is always at BP_ADDR. */
1661 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1662
1663 /* If we were given an initial argument for the return slot because
1664 lang_struct_return was true, lose it. */
1665 if (lang_struct_return)
1666 {
1667 args++;
1668 nargs--;
1669 }
1670
1671 /* The struct_return pointer occupies X8. */
1672 if (struct_return || lang_struct_return)
1673 {
1674 if (aarch64_debug)
b277c936
PL
1675 {
1676 debug_printf ("struct return in %s = 0x%s\n",
1677 gdbarch_register_name (gdbarch,
1678 AARCH64_STRUCT_RETURN_REGNUM),
1679 paddress (gdbarch, struct_addr));
1680 }
07b287a0
MS
1681 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1682 struct_addr);
1683 }
1684
1685 for (argnum = 0; argnum < nargs; argnum++)
1686 {
1687 struct value *arg = args[argnum];
1688 struct type *arg_type;
1689 int len;
1690
1691 arg_type = check_typedef (value_type (arg));
1692 len = TYPE_LENGTH (arg_type);
1693
1694 switch (TYPE_CODE (arg_type))
1695 {
1696 case TYPE_CODE_INT:
1697 case TYPE_CODE_BOOL:
1698 case TYPE_CODE_CHAR:
1699 case TYPE_CODE_RANGE:
1700 case TYPE_CODE_ENUM:
1701 if (len < 4)
1702 {
1703 /* Promote to 32 bit integer. */
1704 if (TYPE_UNSIGNED (arg_type))
1705 arg_type = builtin_type (gdbarch)->builtin_uint32;
1706 else
1707 arg_type = builtin_type (gdbarch)->builtin_int32;
1708 arg = value_cast (arg_type, arg);
1709 }
1710 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1711 value_contents (arg));
1712 break;
1713
1714 case TYPE_CODE_COMPLEX:
1715 if (info.nsrn <= 6)
1716 {
1717 const bfd_byte *buf = value_contents (arg);
1718 struct type *target_type =
1719 check_typedef (TYPE_TARGET_TYPE (arg_type));
1720
1721 pass_in_v (gdbarch, regcache, &info, buf);
1722 pass_in_v (gdbarch, regcache, &info,
1723 buf + TYPE_LENGTH (target_type));
1724 }
1725 else
1726 {
1727 info.nsrn = 8;
1728 pass_on_stack (&info, arg_type, value_contents (arg));
1729 }
1730 break;
1731 case TYPE_CODE_FLT:
1732 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1733 value_contents (arg));
1734 break;
1735
1736 case TYPE_CODE_STRUCT:
1737 case TYPE_CODE_ARRAY:
1738 case TYPE_CODE_UNION:
1739 if (is_hfa (arg_type))
1740 {
1741 int elements = TYPE_NFIELDS (arg_type);
1742
1743 /* Homogeneous Aggregates */
1744 if (info.nsrn + elements < 8)
1745 {
1746 int i;
1747
1748 for (i = 0; i < elements; i++)
1749 {
1750 /* We know that we have sufficient registers
1751 available therefore this will never fallback
1752 to the stack. */
1753 struct value *field =
1754 value_primitive_field (arg, 0, i, arg_type);
1755 struct type *field_type =
1756 check_typedef (value_type (field));
1757
1758 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1759 value_contents_writeable (field));
1760 }
1761 }
1762 else
1763 {
1764 info.nsrn = 8;
1765 pass_on_stack (&info, arg_type, value_contents (arg));
1766 }
1767 }
1768 else if (len > 16)
1769 {
1770 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1771 invisible reference. */
1772
1773 /* Allocate aligned storage. */
1774 sp = align_down (sp - len, 16);
1775
1776 /* Write the real data into the stack. */
1777 write_memory (sp, value_contents (arg), len);
1778
1779 /* Construct the indirection. */
1780 arg_type = lookup_pointer_type (arg_type);
1781 arg = value_from_pointer (arg_type, sp);
1782 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1783 value_contents (arg));
1784 }
1785 else
1786 /* PCS C.15 / C.18 multiple values pass. */
1787 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1788 value_contents (arg));
1789 break;
1790
1791 default:
1792 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1793 value_contents (arg));
1794 break;
1795 }
1796 }
1797
1798 /* Make sure stack retains 16 byte alignment. */
1799 if (info.nsaa & 15)
1800 sp -= 16 - (info.nsaa & 15);
1801
1802 while (!VEC_empty (stack_item_t, info.si))
1803 {
1804 stack_item_t *si = VEC_last (stack_item_t, info.si);
1805
1806 sp -= si->len;
1807 write_memory (sp, si->data, si->len);
1808 VEC_pop (stack_item_t, info.si);
1809 }
1810
1811 VEC_free (stack_item_t, info.si);
1812
1813 /* Finally, update the SP register. */
1814 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1815
1816 return sp;
1817}
1818
1819/* Implement the "frame_align" gdbarch method. */
1820
1821static CORE_ADDR
1822aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1823{
1824 /* Align the stack to sixteen bytes. */
1825 return sp & ~(CORE_ADDR) 15;
1826}
1827
1828/* Return the type for an AdvSISD Q register. */
1829
1830static struct type *
1831aarch64_vnq_type (struct gdbarch *gdbarch)
1832{
1833 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1834
1835 if (tdep->vnq_type == NULL)
1836 {
1837 struct type *t;
1838 struct type *elem;
1839
1840 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1841 TYPE_CODE_UNION);
1842
1843 elem = builtin_type (gdbarch)->builtin_uint128;
1844 append_composite_type_field (t, "u", elem);
1845
1846 elem = builtin_type (gdbarch)->builtin_int128;
1847 append_composite_type_field (t, "s", elem);
1848
1849 tdep->vnq_type = t;
1850 }
1851
1852 return tdep->vnq_type;
1853}
1854
1855/* Return the type for an AdvSISD D register. */
1856
1857static struct type *
1858aarch64_vnd_type (struct gdbarch *gdbarch)
1859{
1860 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1861
1862 if (tdep->vnd_type == NULL)
1863 {
1864 struct type *t;
1865 struct type *elem;
1866
1867 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1868 TYPE_CODE_UNION);
1869
1870 elem = builtin_type (gdbarch)->builtin_double;
1871 append_composite_type_field (t, "f", elem);
1872
1873 elem = builtin_type (gdbarch)->builtin_uint64;
1874 append_composite_type_field (t, "u", elem);
1875
1876 elem = builtin_type (gdbarch)->builtin_int64;
1877 append_composite_type_field (t, "s", elem);
1878
1879 tdep->vnd_type = t;
1880 }
1881
1882 return tdep->vnd_type;
1883}
1884
1885/* Return the type for an AdvSISD S register. */
1886
1887static struct type *
1888aarch64_vns_type (struct gdbarch *gdbarch)
1889{
1890 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1891
1892 if (tdep->vns_type == NULL)
1893 {
1894 struct type *t;
1895 struct type *elem;
1896
1897 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1898 TYPE_CODE_UNION);
1899
1900 elem = builtin_type (gdbarch)->builtin_float;
1901 append_composite_type_field (t, "f", elem);
1902
1903 elem = builtin_type (gdbarch)->builtin_uint32;
1904 append_composite_type_field (t, "u", elem);
1905
1906 elem = builtin_type (gdbarch)->builtin_int32;
1907 append_composite_type_field (t, "s", elem);
1908
1909 tdep->vns_type = t;
1910 }
1911
1912 return tdep->vns_type;
1913}
1914
1915/* Return the type for an AdvSISD H register. */
1916
1917static struct type *
1918aarch64_vnh_type (struct gdbarch *gdbarch)
1919{
1920 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1921
1922 if (tdep->vnh_type == NULL)
1923 {
1924 struct type *t;
1925 struct type *elem;
1926
1927 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1928 TYPE_CODE_UNION);
1929
1930 elem = builtin_type (gdbarch)->builtin_uint16;
1931 append_composite_type_field (t, "u", elem);
1932
1933 elem = builtin_type (gdbarch)->builtin_int16;
1934 append_composite_type_field (t, "s", elem);
1935
1936 tdep->vnh_type = t;
1937 }
1938
1939 return tdep->vnh_type;
1940}
1941
1942/* Return the type for an AdvSISD B register. */
1943
1944static struct type *
1945aarch64_vnb_type (struct gdbarch *gdbarch)
1946{
1947 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1948
1949 if (tdep->vnb_type == NULL)
1950 {
1951 struct type *t;
1952 struct type *elem;
1953
1954 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1955 TYPE_CODE_UNION);
1956
1957 elem = builtin_type (gdbarch)->builtin_uint8;
1958 append_composite_type_field (t, "u", elem);
1959
1960 elem = builtin_type (gdbarch)->builtin_int8;
1961 append_composite_type_field (t, "s", elem);
1962
1963 tdep->vnb_type = t;
1964 }
1965
1966 return tdep->vnb_type;
1967}
1968
1969/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1970
1971static int
1972aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1973{
1974 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1975 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1976
1977 if (reg == AARCH64_DWARF_SP)
1978 return AARCH64_SP_REGNUM;
1979
1980 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1981 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1982
1983 return -1;
1984}
1985\f
1986
1987/* Implement the "print_insn" gdbarch method. */
1988
1989static int
1990aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1991{
1992 info->symbols = NULL;
1993 return print_insn_aarch64 (memaddr, info);
1994}
1995
1996/* AArch64 BRK software debug mode instruction.
1997 Note that AArch64 code is always little-endian.
1998 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1999static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
2000
2001/* Implement the "breakpoint_from_pc" gdbarch method. */
2002
948f8e3d 2003static const gdb_byte *
07b287a0
MS
2004aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
2005 int *lenptr)
2006{
2007 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2008
2009 *lenptr = sizeof (aarch64_default_breakpoint);
2010 return aarch64_default_breakpoint;
2011}
2012
2013/* Extract from an array REGS containing the (raw) register state a
2014 function return value of type TYPE, and copy that, in virtual
2015 format, into VALBUF. */
2016
2017static void
2018aarch64_extract_return_value (struct type *type, struct regcache *regs,
2019 gdb_byte *valbuf)
2020{
2021 struct gdbarch *gdbarch = get_regcache_arch (regs);
2022 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2023
2024 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2025 {
2026 bfd_byte buf[V_REGISTER_SIZE];
2027 int len = TYPE_LENGTH (type);
2028
2029 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
2030 memcpy (valbuf, buf, len);
2031 }
2032 else if (TYPE_CODE (type) == TYPE_CODE_INT
2033 || TYPE_CODE (type) == TYPE_CODE_CHAR
2034 || TYPE_CODE (type) == TYPE_CODE_BOOL
2035 || TYPE_CODE (type) == TYPE_CODE_PTR
2036 || TYPE_CODE (type) == TYPE_CODE_REF
2037 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2038 {
2039 /* If the the type is a plain integer, then the access is
2040 straight-forward. Otherwise we have to play around a bit
2041 more. */
2042 int len = TYPE_LENGTH (type);
2043 int regno = AARCH64_X0_REGNUM;
2044 ULONGEST tmp;
2045
2046 while (len > 0)
2047 {
2048 /* By using store_unsigned_integer we avoid having to do
2049 anything special for small big-endian values. */
2050 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2051 store_unsigned_integer (valbuf,
2052 (len > X_REGISTER_SIZE
2053 ? X_REGISTER_SIZE : len), byte_order, tmp);
2054 len -= X_REGISTER_SIZE;
2055 valbuf += X_REGISTER_SIZE;
2056 }
2057 }
2058 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2059 {
2060 int regno = AARCH64_V0_REGNUM;
2061 bfd_byte buf[V_REGISTER_SIZE];
2062 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2063 int len = TYPE_LENGTH (target_type);
2064
2065 regcache_cooked_read (regs, regno, buf);
2066 memcpy (valbuf, buf, len);
2067 valbuf += len;
2068 regcache_cooked_read (regs, regno + 1, buf);
2069 memcpy (valbuf, buf, len);
2070 valbuf += len;
2071 }
2072 else if (is_hfa (type))
2073 {
2074 int elements = TYPE_NFIELDS (type);
2075 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2076 int len = TYPE_LENGTH (member_type);
2077 int i;
2078
2079 for (i = 0; i < elements; i++)
2080 {
2081 int regno = AARCH64_V0_REGNUM + i;
2082 bfd_byte buf[X_REGISTER_SIZE];
2083
2084 if (aarch64_debug)
b277c936
PL
2085 {
2086 debug_printf ("read HFA return value element %d from %s\n",
2087 i + 1,
2088 gdbarch_register_name (gdbarch, regno));
2089 }
07b287a0
MS
2090 regcache_cooked_read (regs, regno, buf);
2091
2092 memcpy (valbuf, buf, len);
2093 valbuf += len;
2094 }
2095 }
2096 else
2097 {
2098 /* For a structure or union the behaviour is as if the value had
2099 been stored to word-aligned memory and then loaded into
2100 registers with 64-bit load instruction(s). */
2101 int len = TYPE_LENGTH (type);
2102 int regno = AARCH64_X0_REGNUM;
2103 bfd_byte buf[X_REGISTER_SIZE];
2104
2105 while (len > 0)
2106 {
2107 regcache_cooked_read (regs, regno++, buf);
2108 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2109 len -= X_REGISTER_SIZE;
2110 valbuf += X_REGISTER_SIZE;
2111 }
2112 }
2113}
2114
2115
2116/* Will a function return an aggregate type in memory or in a
2117 register? Return 0 if an aggregate type can be returned in a
2118 register, 1 if it must be returned in memory. */
2119
2120static int
2121aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2122{
2123 int nRc;
2124 enum type_code code;
2125
f168693b 2126 type = check_typedef (type);
07b287a0
MS
2127
2128 /* In the AArch64 ABI, "integer" like aggregate types are returned
2129 in registers. For an aggregate type to be integer like, its size
2130 must be less than or equal to 4 * X_REGISTER_SIZE. */
2131
2132 if (is_hfa (type))
2133 {
2134 /* PCS B.5 If the argument is a Named HFA, then the argument is
2135 used unmodified. */
2136 return 0;
2137 }
2138
2139 if (TYPE_LENGTH (type) > 16)
2140 {
2141 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2142 invisible reference. */
2143
2144 return 1;
2145 }
2146
2147 return 0;
2148}
2149
2150/* Write into appropriate registers a function return value of type
2151 TYPE, given in virtual format. */
2152
2153static void
2154aarch64_store_return_value (struct type *type, struct regcache *regs,
2155 const gdb_byte *valbuf)
2156{
2157 struct gdbarch *gdbarch = get_regcache_arch (regs);
2158 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2159
2160 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2161 {
2162 bfd_byte buf[V_REGISTER_SIZE];
2163 int len = TYPE_LENGTH (type);
2164
2165 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2166 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2167 }
2168 else if (TYPE_CODE (type) == TYPE_CODE_INT
2169 || TYPE_CODE (type) == TYPE_CODE_CHAR
2170 || TYPE_CODE (type) == TYPE_CODE_BOOL
2171 || TYPE_CODE (type) == TYPE_CODE_PTR
2172 || TYPE_CODE (type) == TYPE_CODE_REF
2173 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2174 {
2175 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2176 {
2177 /* Values of one word or less are zero/sign-extended and
2178 returned in r0. */
2179 bfd_byte tmpbuf[X_REGISTER_SIZE];
2180 LONGEST val = unpack_long (type, valbuf);
2181
2182 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2183 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2184 }
2185 else
2186 {
2187 /* Integral values greater than one word are stored in
2188 consecutive registers starting with r0. This will always
2189 be a multiple of the regiser size. */
2190 int len = TYPE_LENGTH (type);
2191 int regno = AARCH64_X0_REGNUM;
2192
2193 while (len > 0)
2194 {
2195 regcache_cooked_write (regs, regno++, valbuf);
2196 len -= X_REGISTER_SIZE;
2197 valbuf += X_REGISTER_SIZE;
2198 }
2199 }
2200 }
2201 else if (is_hfa (type))
2202 {
2203 int elements = TYPE_NFIELDS (type);
2204 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2205 int len = TYPE_LENGTH (member_type);
2206 int i;
2207
2208 for (i = 0; i < elements; i++)
2209 {
2210 int regno = AARCH64_V0_REGNUM + i;
2211 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2212
2213 if (aarch64_debug)
b277c936
PL
2214 {
2215 debug_printf ("write HFA return value element %d to %s\n",
2216 i + 1,
2217 gdbarch_register_name (gdbarch, regno));
2218 }
07b287a0
MS
2219
2220 memcpy (tmpbuf, valbuf, len);
2221 regcache_cooked_write (regs, regno, tmpbuf);
2222 valbuf += len;
2223 }
2224 }
2225 else
2226 {
2227 /* For a structure or union the behaviour is as if the value had
2228 been stored to word-aligned memory and then loaded into
2229 registers with 64-bit load instruction(s). */
2230 int len = TYPE_LENGTH (type);
2231 int regno = AARCH64_X0_REGNUM;
2232 bfd_byte tmpbuf[X_REGISTER_SIZE];
2233
2234 while (len > 0)
2235 {
2236 memcpy (tmpbuf, valbuf,
2237 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2238 regcache_cooked_write (regs, regno++, tmpbuf);
2239 len -= X_REGISTER_SIZE;
2240 valbuf += X_REGISTER_SIZE;
2241 }
2242 }
2243}
2244
2245/* Implement the "return_value" gdbarch method. */
2246
2247static enum return_value_convention
2248aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2249 struct type *valtype, struct regcache *regcache,
2250 gdb_byte *readbuf, const gdb_byte *writebuf)
2251{
2252 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2253
2254 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2255 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2256 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2257 {
2258 if (aarch64_return_in_memory (gdbarch, valtype))
2259 {
2260 if (aarch64_debug)
b277c936 2261 debug_printf ("return value in memory\n");
07b287a0
MS
2262 return RETURN_VALUE_STRUCT_CONVENTION;
2263 }
2264 }
2265
2266 if (writebuf)
2267 aarch64_store_return_value (valtype, regcache, writebuf);
2268
2269 if (readbuf)
2270 aarch64_extract_return_value (valtype, regcache, readbuf);
2271
2272 if (aarch64_debug)
b277c936 2273 debug_printf ("return value in registers\n");
07b287a0
MS
2274
2275 return RETURN_VALUE_REGISTER_CONVENTION;
2276}
2277
2278/* Implement the "get_longjmp_target" gdbarch method. */
2279
2280static int
2281aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2282{
2283 CORE_ADDR jb_addr;
2284 gdb_byte buf[X_REGISTER_SIZE];
2285 struct gdbarch *gdbarch = get_frame_arch (frame);
2286 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2287 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2288
2289 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2290
2291 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2292 X_REGISTER_SIZE))
2293 return 0;
2294
2295 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2296 return 1;
2297}
ea873d8e
PL
2298
2299/* Implement the "gen_return_address" gdbarch method. */
2300
2301static void
2302aarch64_gen_return_address (struct gdbarch *gdbarch,
2303 struct agent_expr *ax, struct axs_value *value,
2304 CORE_ADDR scope)
2305{
2306 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2307 value->kind = axs_lvalue_register;
2308 value->u.reg = AARCH64_LR_REGNUM;
2309}
07b287a0
MS
2310\f
2311
2312/* Return the pseudo register name corresponding to register regnum. */
2313
2314static const char *
2315aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2316{
2317 static const char *const q_name[] =
2318 {
2319 "q0", "q1", "q2", "q3",
2320 "q4", "q5", "q6", "q7",
2321 "q8", "q9", "q10", "q11",
2322 "q12", "q13", "q14", "q15",
2323 "q16", "q17", "q18", "q19",
2324 "q20", "q21", "q22", "q23",
2325 "q24", "q25", "q26", "q27",
2326 "q28", "q29", "q30", "q31",
2327 };
2328
2329 static const char *const d_name[] =
2330 {
2331 "d0", "d1", "d2", "d3",
2332 "d4", "d5", "d6", "d7",
2333 "d8", "d9", "d10", "d11",
2334 "d12", "d13", "d14", "d15",
2335 "d16", "d17", "d18", "d19",
2336 "d20", "d21", "d22", "d23",
2337 "d24", "d25", "d26", "d27",
2338 "d28", "d29", "d30", "d31",
2339 };
2340
2341 static const char *const s_name[] =
2342 {
2343 "s0", "s1", "s2", "s3",
2344 "s4", "s5", "s6", "s7",
2345 "s8", "s9", "s10", "s11",
2346 "s12", "s13", "s14", "s15",
2347 "s16", "s17", "s18", "s19",
2348 "s20", "s21", "s22", "s23",
2349 "s24", "s25", "s26", "s27",
2350 "s28", "s29", "s30", "s31",
2351 };
2352
2353 static const char *const h_name[] =
2354 {
2355 "h0", "h1", "h2", "h3",
2356 "h4", "h5", "h6", "h7",
2357 "h8", "h9", "h10", "h11",
2358 "h12", "h13", "h14", "h15",
2359 "h16", "h17", "h18", "h19",
2360 "h20", "h21", "h22", "h23",
2361 "h24", "h25", "h26", "h27",
2362 "h28", "h29", "h30", "h31",
2363 };
2364
2365 static const char *const b_name[] =
2366 {
2367 "b0", "b1", "b2", "b3",
2368 "b4", "b5", "b6", "b7",
2369 "b8", "b9", "b10", "b11",
2370 "b12", "b13", "b14", "b15",
2371 "b16", "b17", "b18", "b19",
2372 "b20", "b21", "b22", "b23",
2373 "b24", "b25", "b26", "b27",
2374 "b28", "b29", "b30", "b31",
2375 };
2376
2377 regnum -= gdbarch_num_regs (gdbarch);
2378
2379 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2380 return q_name[regnum - AARCH64_Q0_REGNUM];
2381
2382 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2383 return d_name[regnum - AARCH64_D0_REGNUM];
2384
2385 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2386 return s_name[regnum - AARCH64_S0_REGNUM];
2387
2388 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2389 return h_name[regnum - AARCH64_H0_REGNUM];
2390
2391 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2392 return b_name[regnum - AARCH64_B0_REGNUM];
2393
2394 internal_error (__FILE__, __LINE__,
2395 _("aarch64_pseudo_register_name: bad register number %d"),
2396 regnum);
2397}
2398
2399/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2400
2401static struct type *
2402aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2403{
2404 regnum -= gdbarch_num_regs (gdbarch);
2405
2406 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2407 return aarch64_vnq_type (gdbarch);
2408
2409 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2410 return aarch64_vnd_type (gdbarch);
2411
2412 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2413 return aarch64_vns_type (gdbarch);
2414
2415 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2416 return aarch64_vnh_type (gdbarch);
2417
2418 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2419 return aarch64_vnb_type (gdbarch);
2420
2421 internal_error (__FILE__, __LINE__,
2422 _("aarch64_pseudo_register_type: bad register number %d"),
2423 regnum);
2424}
2425
2426/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2427
2428static int
2429aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2430 struct reggroup *group)
2431{
2432 regnum -= gdbarch_num_regs (gdbarch);
2433
2434 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2435 return group == all_reggroup || group == vector_reggroup;
2436 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2437 return (group == all_reggroup || group == vector_reggroup
2438 || group == float_reggroup);
2439 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2440 return (group == all_reggroup || group == vector_reggroup
2441 || group == float_reggroup);
2442 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2443 return group == all_reggroup || group == vector_reggroup;
2444 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2445 return group == all_reggroup || group == vector_reggroup;
2446
2447 return group == all_reggroup;
2448}
2449
2450/* Implement the "pseudo_register_read_value" gdbarch method. */
2451
2452static struct value *
2453aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2454 struct regcache *regcache,
2455 int regnum)
2456{
2457 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2458 struct value *result_value;
2459 gdb_byte *buf;
2460
2461 result_value = allocate_value (register_type (gdbarch, regnum));
2462 VALUE_LVAL (result_value) = lval_register;
2463 VALUE_REGNUM (result_value) = regnum;
2464 buf = value_contents_raw (result_value);
2465
2466 regnum -= gdbarch_num_regs (gdbarch);
2467
2468 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2469 {
2470 enum register_status status;
2471 unsigned v_regnum;
2472
2473 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2474 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2475 if (status != REG_VALID)
2476 mark_value_bytes_unavailable (result_value, 0,
2477 TYPE_LENGTH (value_type (result_value)));
2478 else
2479 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2480 return result_value;
2481 }
2482
2483 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2484 {
2485 enum register_status status;
2486 unsigned v_regnum;
2487
2488 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2489 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2490 if (status != REG_VALID)
2491 mark_value_bytes_unavailable (result_value, 0,
2492 TYPE_LENGTH (value_type (result_value)));
2493 else
2494 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2495 return result_value;
2496 }
2497
2498 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2499 {
2500 enum register_status status;
2501 unsigned v_regnum;
2502
2503 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2504 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2505 if (status != REG_VALID)
2506 mark_value_bytes_unavailable (result_value, 0,
2507 TYPE_LENGTH (value_type (result_value)));
2508 else
2509 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2510 return result_value;
2511 }
2512
2513 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2514 {
2515 enum register_status status;
2516 unsigned v_regnum;
2517
2518 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2519 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2520 if (status != REG_VALID)
2521 mark_value_bytes_unavailable (result_value, 0,
2522 TYPE_LENGTH (value_type (result_value)));
2523 else
2524 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2525 return result_value;
2526 }
2527
2528 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2529 {
2530 enum register_status status;
2531 unsigned v_regnum;
2532
2533 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2534 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2535 if (status != REG_VALID)
2536 mark_value_bytes_unavailable (result_value, 0,
2537 TYPE_LENGTH (value_type (result_value)));
2538 else
2539 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2540 return result_value;
2541 }
2542
2543 gdb_assert_not_reached ("regnum out of bound");
2544}
2545
2546/* Implement the "pseudo_register_write" gdbarch method. */
2547
2548static void
2549aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2550 int regnum, const gdb_byte *buf)
2551{
2552 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2553
2554 /* Ensure the register buffer is zero, we want gdb writes of the
2555 various 'scalar' pseudo registers to behavior like architectural
2556 writes, register width bytes are written the remainder are set to
2557 zero. */
2558 memset (reg_buf, 0, sizeof (reg_buf));
2559
2560 regnum -= gdbarch_num_regs (gdbarch);
2561
2562 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2563 {
2564 /* pseudo Q registers */
2565 unsigned v_regnum;
2566
2567 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2568 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2569 regcache_raw_write (regcache, v_regnum, reg_buf);
2570 return;
2571 }
2572
2573 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2574 {
2575 /* pseudo D registers */
2576 unsigned v_regnum;
2577
2578 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2579 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2580 regcache_raw_write (regcache, v_regnum, reg_buf);
2581 return;
2582 }
2583
2584 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2585 {
2586 unsigned v_regnum;
2587
2588 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2589 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2590 regcache_raw_write (regcache, v_regnum, reg_buf);
2591 return;
2592 }
2593
2594 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2595 {
2596 /* pseudo H registers */
2597 unsigned v_regnum;
2598
2599 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2600 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2601 regcache_raw_write (regcache, v_regnum, reg_buf);
2602 return;
2603 }
2604
2605 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2606 {
2607 /* pseudo B registers */
2608 unsigned v_regnum;
2609
2610 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2611 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2612 regcache_raw_write (regcache, v_regnum, reg_buf);
2613 return;
2614 }
2615
2616 gdb_assert_not_reached ("regnum out of bound");
2617}
2618
07b287a0
MS
2619/* Callback function for user_reg_add. */
2620
2621static struct value *
2622value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2623{
2624 const int *reg_p = baton;
2625
2626 return value_of_register (*reg_p, frame);
2627}
2628\f
2629
9404b58f
KM
2630/* Implement the "software_single_step" gdbarch method, needed to
2631 single step through atomic sequences on AArch64. */
2632
2633static int
2634aarch64_software_single_step (struct frame_info *frame)
2635{
2636 struct gdbarch *gdbarch = get_frame_arch (frame);
2637 struct address_space *aspace = get_frame_address_space (frame);
2638 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2639 const int insn_size = 4;
2640 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2641 CORE_ADDR pc = get_frame_pc (frame);
2642 CORE_ADDR breaks[2] = { -1, -1 };
2643 CORE_ADDR loc = pc;
2644 CORE_ADDR closing_insn = 0;
2645 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2646 byte_order_for_code);
2647 int index;
2648 int insn_count;
2649 int bc_insn_count = 0; /* Conditional branch instruction count. */
2650 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2651
2652 /* Look for a Load Exclusive instruction which begins the sequence. */
2653 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2654 return 0;
2655
2656 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2657 {
2658 int32_t offset;
2659 unsigned cond;
2660
2661 loc += insn_size;
2662 insn = read_memory_unsigned_integer (loc, insn_size,
2663 byte_order_for_code);
2664
2665 /* Check if the instruction is a conditional branch. */
2666 if (decode_bcond (loc, insn, &cond, &offset))
2667 {
2668 if (bc_insn_count >= 1)
2669 return 0;
2670
2671 /* It is, so we'll try to set a breakpoint at the destination. */
2672 breaks[1] = loc + offset;
2673
2674 bc_insn_count++;
2675 last_breakpoint++;
2676 }
2677
2678 /* Look for the Store Exclusive which closes the atomic sequence. */
2679 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2680 {
2681 closing_insn = loc;
2682 break;
2683 }
2684 }
2685
2686 /* We didn't find a closing Store Exclusive instruction, fall back. */
2687 if (!closing_insn)
2688 return 0;
2689
2690 /* Insert breakpoint after the end of the atomic sequence. */
2691 breaks[0] = loc + insn_size;
2692
2693 /* Check for duplicated breakpoints, and also check that the second
2694 breakpoint is not within the atomic sequence. */
2695 if (last_breakpoint
2696 && (breaks[1] == breaks[0]
2697 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2698 last_breakpoint = 0;
2699
2700 /* Insert the breakpoint at the end of the sequence, and one at the
2701 destination of the conditional branch, if it exists. */
2702 for (index = 0; index <= last_breakpoint; index++)
2703 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2704
2705 return 1;
2706}
2707
07b287a0
MS
2708/* Initialize the current architecture based on INFO. If possible,
2709 re-use an architecture from ARCHES, which is a list of
2710 architectures already created during this debugging session.
2711
2712 Called e.g. at program startup, when reading a core file, and when
2713 reading a binary file. */
2714
2715static struct gdbarch *
2716aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2717{
2718 struct gdbarch_tdep *tdep;
2719 struct gdbarch *gdbarch;
2720 struct gdbarch_list *best_arch;
2721 struct tdesc_arch_data *tdesc_data = NULL;
2722 const struct target_desc *tdesc = info.target_desc;
2723 int i;
2724 int have_fpa_registers = 1;
2725 int valid_p = 1;
2726 const struct tdesc_feature *feature;
2727 int num_regs = 0;
2728 int num_pseudo_regs = 0;
2729
2730 /* Ensure we always have a target descriptor. */
2731 if (!tdesc_has_registers (tdesc))
2732 tdesc = tdesc_aarch64;
2733
2734 gdb_assert (tdesc);
2735
2736 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2737
2738 if (feature == NULL)
2739 return NULL;
2740
2741 tdesc_data = tdesc_data_alloc ();
2742
2743 /* Validate the descriptor provides the mandatory core R registers
2744 and allocate their numbers. */
2745 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2746 valid_p &=
2747 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2748 aarch64_r_register_names[i]);
2749
2750 num_regs = AARCH64_X0_REGNUM + i;
2751
2752 /* Look for the V registers. */
2753 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2754 if (feature)
2755 {
2756 /* Validate the descriptor provides the mandatory V registers
2757 and allocate their numbers. */
2758 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2759 valid_p &=
2760 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2761 aarch64_v_register_names[i]);
2762
2763 num_regs = AARCH64_V0_REGNUM + i;
2764
2765 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2766 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2767 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2768 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2769 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2770 }
2771
2772 if (!valid_p)
2773 {
2774 tdesc_data_cleanup (tdesc_data);
2775 return NULL;
2776 }
2777
2778 /* AArch64 code is always little-endian. */
2779 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2780
2781 /* If there is already a candidate, use it. */
2782 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2783 best_arch != NULL;
2784 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2785 {
2786 /* Found a match. */
2787 break;
2788 }
2789
2790 if (best_arch != NULL)
2791 {
2792 if (tdesc_data != NULL)
2793 tdesc_data_cleanup (tdesc_data);
2794 return best_arch->gdbarch;
2795 }
2796
8d749320 2797 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2798 gdbarch = gdbarch_alloc (&info, tdep);
2799
2800 /* This should be low enough for everything. */
2801 tdep->lowest_pc = 0x20;
2802 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2803 tdep->jb_elt_size = 8;
2804
2805 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2806 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2807
07b287a0
MS
2808 /* Frame handling. */
2809 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2810 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2811 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2812
2813 /* Advance PC across function entry code. */
2814 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2815
2816 /* The stack grows downward. */
2817 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2818
2819 /* Breakpoint manipulation. */
2820 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2821 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2822 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2823
2824 /* Information about registers, etc. */
2825 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2826 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2827 set_gdbarch_num_regs (gdbarch, num_regs);
2828
2829 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2830 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2831 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2832 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2833 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2834 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2835 aarch64_pseudo_register_reggroup_p);
2836
2837 /* ABI */
2838 set_gdbarch_short_bit (gdbarch, 16);
2839 set_gdbarch_int_bit (gdbarch, 32);
2840 set_gdbarch_float_bit (gdbarch, 32);
2841 set_gdbarch_double_bit (gdbarch, 64);
2842 set_gdbarch_long_double_bit (gdbarch, 128);
2843 set_gdbarch_long_bit (gdbarch, 64);
2844 set_gdbarch_long_long_bit (gdbarch, 64);
2845 set_gdbarch_ptr_bit (gdbarch, 64);
2846 set_gdbarch_char_signed (gdbarch, 0);
2847 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2848 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2849 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2850
2851 /* Internal <-> external register number maps. */
2852 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2853
2854 /* Returning results. */
2855 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2856
2857 /* Disassembly. */
2858 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2859
2860 /* Virtual tables. */
2861 set_gdbarch_vbit_in_delta (gdbarch, 1);
2862
2863 /* Hook in the ABI-specific overrides, if they have been registered. */
2864 info.target_desc = tdesc;
2865 info.tdep_info = (void *) tdesc_data;
2866 gdbarch_init_osabi (info, gdbarch);
2867
2868 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2869
2870 /* Add some default predicates. */
2871 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2872 dwarf2_append_unwinders (gdbarch);
2873 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2874
2875 frame_base_set_default (gdbarch, &aarch64_normal_base);
2876
2877 /* Now we have tuned the configuration, set a few final things,
2878 based on what the OS ABI has told us. */
2879
2880 if (tdep->jb_pc >= 0)
2881 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2882
ea873d8e
PL
2883 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2884
07b287a0
MS
2885 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2886
2887 /* Add standard register aliases. */
2888 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2889 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2890 value_of_aarch64_user_reg,
2891 &aarch64_register_aliases[i].regnum);
2892
2893 return gdbarch;
2894}
2895
2896static void
2897aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2898{
2899 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2900
2901 if (tdep == NULL)
2902 return;
2903
2904 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2905 paddress (gdbarch, tdep->lowest_pc));
2906}
2907
2908/* Suppress warning from -Wmissing-prototypes. */
2909extern initialize_file_ftype _initialize_aarch64_tdep;
2910
2911void
2912_initialize_aarch64_tdep (void)
2913{
2914 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2915 aarch64_dump_tdep);
2916
2917 initialize_tdesc_aarch64 ();
07b287a0
MS
2918
2919 /* Debug this file's internals. */
2920 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2921Set AArch64 debugging."), _("\
2922Show AArch64 debugging."), _("\
2923When on, AArch64 specific debugging is enabled."),
2924 NULL,
2925 show_aarch64_debug,
2926 &setdebuglist, &showdebuglist);
2927}
99afc88b
OJ
2928
2929/* AArch64 process record-replay related structures, defines etc. */
2930
2931#define submask(x) ((1L << ((x) + 1)) - 1)
2932#define bit(obj,st) (((obj) >> (st)) & 1)
2933#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2934
2935#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2936 do \
2937 { \
2938 unsigned int reg_len = LENGTH; \
2939 if (reg_len) \
2940 { \
2941 REGS = XNEWVEC (uint32_t, reg_len); \
2942 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2943 } \
2944 } \
2945 while (0)
2946
2947#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2948 do \
2949 { \
2950 unsigned int mem_len = LENGTH; \
2951 if (mem_len) \
2952 { \
2953 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2954 memcpy(&MEMS->len, &RECORD_BUF[0], \
2955 sizeof(struct aarch64_mem_r) * LENGTH); \
2956 } \
2957 } \
2958 while (0)
2959
2960/* AArch64 record/replay structures and enumerations. */
2961
2962struct aarch64_mem_r
2963{
2964 uint64_t len; /* Record length. */
2965 uint64_t addr; /* Memory address. */
2966};
2967
2968enum aarch64_record_result
2969{
2970 AARCH64_RECORD_SUCCESS,
2971 AARCH64_RECORD_FAILURE,
2972 AARCH64_RECORD_UNSUPPORTED,
2973 AARCH64_RECORD_UNKNOWN
2974};
2975
2976typedef struct insn_decode_record_t
2977{
2978 struct gdbarch *gdbarch;
2979 struct regcache *regcache;
2980 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2981 uint32_t aarch64_insn; /* Insn to be recorded. */
2982 uint32_t mem_rec_count; /* Count of memory records. */
2983 uint32_t reg_rec_count; /* Count of register records. */
2984 uint32_t *aarch64_regs; /* Registers to be recorded. */
2985 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2986} insn_decode_record;
2987
2988/* Record handler for data processing - register instructions. */
2989
2990static unsigned int
2991aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2992{
2993 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2994 uint32_t record_buf[4];
2995
2996 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2997 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2998 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2999
3000 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3001 {
3002 uint8_t setflags;
3003
3004 /* Logical (shifted register). */
3005 if (insn_bits24_27 == 0x0a)
3006 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3007 /* Add/subtract. */
3008 else if (insn_bits24_27 == 0x0b)
3009 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3010 else
3011 return AARCH64_RECORD_UNKNOWN;
3012
3013 record_buf[0] = reg_rd;
3014 aarch64_insn_r->reg_rec_count = 1;
3015 if (setflags)
3016 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3017 }
3018 else
3019 {
3020 if (insn_bits24_27 == 0x0b)
3021 {
3022 /* Data-processing (3 source). */
3023 record_buf[0] = reg_rd;
3024 aarch64_insn_r->reg_rec_count = 1;
3025 }
3026 else if (insn_bits24_27 == 0x0a)
3027 {
3028 if (insn_bits21_23 == 0x00)
3029 {
3030 /* Add/subtract (with carry). */
3031 record_buf[0] = reg_rd;
3032 aarch64_insn_r->reg_rec_count = 1;
3033 if (bit (aarch64_insn_r->aarch64_insn, 29))
3034 {
3035 record_buf[1] = AARCH64_CPSR_REGNUM;
3036 aarch64_insn_r->reg_rec_count = 2;
3037 }
3038 }
3039 else if (insn_bits21_23 == 0x02)
3040 {
3041 /* Conditional compare (register) and conditional compare
3042 (immediate) instructions. */
3043 record_buf[0] = AARCH64_CPSR_REGNUM;
3044 aarch64_insn_r->reg_rec_count = 1;
3045 }
3046 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3047 {
3048 /* CConditional select. */
3049 /* Data-processing (2 source). */
3050 /* Data-processing (1 source). */
3051 record_buf[0] = reg_rd;
3052 aarch64_insn_r->reg_rec_count = 1;
3053 }
3054 else
3055 return AARCH64_RECORD_UNKNOWN;
3056 }
3057 }
3058
3059 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3060 record_buf);
3061 return AARCH64_RECORD_SUCCESS;
3062}
3063
3064/* Record handler for data processing - immediate instructions. */
3065
3066static unsigned int
3067aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3068{
3069 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3070 uint32_t record_buf[4];
3071
3072 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3073 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3074 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3075 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3076
3077 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3078 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3079 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3080 {
3081 record_buf[0] = reg_rd;
3082 aarch64_insn_r->reg_rec_count = 1;
3083 }
3084 else if (insn_bits24_27 == 0x01)
3085 {
3086 /* Add/Subtract (immediate). */
3087 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3088 record_buf[0] = reg_rd;
3089 aarch64_insn_r->reg_rec_count = 1;
3090 if (setflags)
3091 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3092 }
3093 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3094 {
3095 /* Logical (immediate). */
3096 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3097 record_buf[0] = reg_rd;
3098 aarch64_insn_r->reg_rec_count = 1;
3099 if (setflags)
3100 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3101 }
3102 else
3103 return AARCH64_RECORD_UNKNOWN;
3104
3105 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3106 record_buf);
3107 return AARCH64_RECORD_SUCCESS;
3108}
3109
3110/* Record handler for branch, exception generation and system instructions. */
3111
3112static unsigned int
3113aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3114{
3115 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3116 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3117 uint32_t record_buf[4];
3118
3119 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3120 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3121 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3122
3123 if (insn_bits28_31 == 0x0d)
3124 {
3125 /* Exception generation instructions. */
3126 if (insn_bits24_27 == 0x04)
3127 {
5d98d3cd
YQ
3128 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3129 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3130 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3131 {
3132 ULONGEST svc_number;
3133
3134 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3135 &svc_number);
3136 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3137 svc_number);
3138 }
3139 else
3140 return AARCH64_RECORD_UNSUPPORTED;
3141 }
3142 /* System instructions. */
3143 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3144 {
3145 uint32_t reg_rt, reg_crn;
3146
3147 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3148 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3149
3150 /* Record rt in case of sysl and mrs instructions. */
3151 if (bit (aarch64_insn_r->aarch64_insn, 21))
3152 {
3153 record_buf[0] = reg_rt;
3154 aarch64_insn_r->reg_rec_count = 1;
3155 }
3156 /* Record cpsr for hint and msr(immediate) instructions. */
3157 else if (reg_crn == 0x02 || reg_crn == 0x04)
3158 {
3159 record_buf[0] = AARCH64_CPSR_REGNUM;
3160 aarch64_insn_r->reg_rec_count = 1;
3161 }
3162 }
3163 /* Unconditional branch (register). */
3164 else if((insn_bits24_27 & 0x0e) == 0x06)
3165 {
3166 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3167 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3168 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3169 }
3170 else
3171 return AARCH64_RECORD_UNKNOWN;
3172 }
3173 /* Unconditional branch (immediate). */
3174 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3175 {
3176 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3177 if (bit (aarch64_insn_r->aarch64_insn, 31))
3178 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3179 }
3180 else
3181 /* Compare & branch (immediate), Test & branch (immediate) and
3182 Conditional branch (immediate). */
3183 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3184
3185 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3186 record_buf);
3187 return AARCH64_RECORD_SUCCESS;
3188}
3189
3190/* Record handler for advanced SIMD load and store instructions. */
3191
3192static unsigned int
3193aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3194{
3195 CORE_ADDR address;
3196 uint64_t addr_offset = 0;
3197 uint32_t record_buf[24];
3198 uint64_t record_buf_mem[24];
3199 uint32_t reg_rn, reg_rt;
3200 uint32_t reg_index = 0, mem_index = 0;
3201 uint8_t opcode_bits, size_bits;
3202
3203 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3204 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3205 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3206 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3207 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3208
3209 if (record_debug)
b277c936 3210 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3211
3212 /* Load/store single structure. */
3213 if (bit (aarch64_insn_r->aarch64_insn, 24))
3214 {
3215 uint8_t sindex, scale, selem, esize, replicate = 0;
3216 scale = opcode_bits >> 2;
3217 selem = ((opcode_bits & 0x02) |
3218 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3219 switch (scale)
3220 {
3221 case 1:
3222 if (size_bits & 0x01)
3223 return AARCH64_RECORD_UNKNOWN;
3224 break;
3225 case 2:
3226 if ((size_bits >> 1) & 0x01)
3227 return AARCH64_RECORD_UNKNOWN;
3228 if (size_bits & 0x01)
3229 {
3230 if (!((opcode_bits >> 1) & 0x01))
3231 scale = 3;
3232 else
3233 return AARCH64_RECORD_UNKNOWN;
3234 }
3235 break;
3236 case 3:
3237 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3238 {
3239 scale = size_bits;
3240 replicate = 1;
3241 break;
3242 }
3243 else
3244 return AARCH64_RECORD_UNKNOWN;
3245 default:
3246 break;
3247 }
3248 esize = 8 << scale;
3249 if (replicate)
3250 for (sindex = 0; sindex < selem; sindex++)
3251 {
3252 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3253 reg_rt = (reg_rt + 1) % 32;
3254 }
3255 else
3256 {
3257 for (sindex = 0; sindex < selem; sindex++)
3258 if (bit (aarch64_insn_r->aarch64_insn, 22))
3259 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3260 else
3261 {
3262 record_buf_mem[mem_index++] = esize / 8;
3263 record_buf_mem[mem_index++] = address + addr_offset;
3264 }
3265 addr_offset = addr_offset + (esize / 8);
3266 reg_rt = (reg_rt + 1) % 32;
3267 }
3268 }
3269 /* Load/store multiple structure. */
3270 else
3271 {
3272 uint8_t selem, esize, rpt, elements;
3273 uint8_t eindex, rindex;
3274
3275 esize = 8 << size_bits;
3276 if (bit (aarch64_insn_r->aarch64_insn, 30))
3277 elements = 128 / esize;
3278 else
3279 elements = 64 / esize;
3280
3281 switch (opcode_bits)
3282 {
3283 /*LD/ST4 (4 Registers). */
3284 case 0:
3285 rpt = 1;
3286 selem = 4;
3287 break;
3288 /*LD/ST1 (4 Registers). */
3289 case 2:
3290 rpt = 4;
3291 selem = 1;
3292 break;
3293 /*LD/ST3 (3 Registers). */
3294 case 4:
3295 rpt = 1;
3296 selem = 3;
3297 break;
3298 /*LD/ST1 (3 Registers). */
3299 case 6:
3300 rpt = 3;
3301 selem = 1;
3302 break;
3303 /*LD/ST1 (1 Register). */
3304 case 7:
3305 rpt = 1;
3306 selem = 1;
3307 break;
3308 /*LD/ST2 (2 Registers). */
3309 case 8:
3310 rpt = 1;
3311 selem = 2;
3312 break;
3313 /*LD/ST1 (2 Registers). */
3314 case 10:
3315 rpt = 2;
3316 selem = 1;
3317 break;
3318 default:
3319 return AARCH64_RECORD_UNSUPPORTED;
3320 break;
3321 }
3322 for (rindex = 0; rindex < rpt; rindex++)
3323 for (eindex = 0; eindex < elements; eindex++)
3324 {
3325 uint8_t reg_tt, sindex;
3326 reg_tt = (reg_rt + rindex) % 32;
3327 for (sindex = 0; sindex < selem; sindex++)
3328 {
3329 if (bit (aarch64_insn_r->aarch64_insn, 22))
3330 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3331 else
3332 {
3333 record_buf_mem[mem_index++] = esize / 8;
3334 record_buf_mem[mem_index++] = address + addr_offset;
3335 }
3336 addr_offset = addr_offset + (esize / 8);
3337 reg_tt = (reg_tt + 1) % 32;
3338 }
3339 }
3340 }
3341
3342 if (bit (aarch64_insn_r->aarch64_insn, 23))
3343 record_buf[reg_index++] = reg_rn;
3344
3345 aarch64_insn_r->reg_rec_count = reg_index;
3346 aarch64_insn_r->mem_rec_count = mem_index / 2;
3347 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3348 record_buf_mem);
3349 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3350 record_buf);
3351 return AARCH64_RECORD_SUCCESS;
3352}
3353
3354/* Record handler for load and store instructions. */
3355
3356static unsigned int
3357aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3358{
3359 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3360 uint8_t insn_bit23, insn_bit21;
3361 uint8_t opc, size_bits, ld_flag, vector_flag;
3362 uint32_t reg_rn, reg_rt, reg_rt2;
3363 uint64_t datasize, offset;
3364 uint32_t record_buf[8];
3365 uint64_t record_buf_mem[8];
3366 CORE_ADDR address;
3367
3368 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3369 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3370 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3371 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3372 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3373 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3374 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3375 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3376 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3377 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3378 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3379
3380 /* Load/store exclusive. */
3381 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3382 {
3383 if (record_debug)
b277c936 3384 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3385
3386 if (ld_flag)
3387 {
3388 record_buf[0] = reg_rt;
3389 aarch64_insn_r->reg_rec_count = 1;
3390 if (insn_bit21)
3391 {
3392 record_buf[1] = reg_rt2;
3393 aarch64_insn_r->reg_rec_count = 2;
3394 }
3395 }
3396 else
3397 {
3398 if (insn_bit21)
3399 datasize = (8 << size_bits) * 2;
3400 else
3401 datasize = (8 << size_bits);
3402 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3403 &address);
3404 record_buf_mem[0] = datasize / 8;
3405 record_buf_mem[1] = address;
3406 aarch64_insn_r->mem_rec_count = 1;
3407 if (!insn_bit23)
3408 {
3409 /* Save register rs. */
3410 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3411 aarch64_insn_r->reg_rec_count = 1;
3412 }
3413 }
3414 }
3415 /* Load register (literal) instructions decoding. */
3416 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3417 {
3418 if (record_debug)
b277c936 3419 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3420 if (vector_flag)
3421 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3422 else
3423 record_buf[0] = reg_rt;
3424 aarch64_insn_r->reg_rec_count = 1;
3425 }
3426 /* All types of load/store pair instructions decoding. */
3427 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3428 {
3429 if (record_debug)
b277c936 3430 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3431
3432 if (ld_flag)
3433 {
3434 if (vector_flag)
3435 {
3436 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3437 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3438 }
3439 else
3440 {
3441 record_buf[0] = reg_rt;
3442 record_buf[1] = reg_rt2;
3443 }
3444 aarch64_insn_r->reg_rec_count = 2;
3445 }
3446 else
3447 {
3448 uint16_t imm7_off;
3449 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3450 if (!vector_flag)
3451 size_bits = size_bits >> 1;
3452 datasize = 8 << (2 + size_bits);
3453 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3454 offset = offset << (2 + size_bits);
3455 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3456 &address);
3457 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3458 {
3459 if (imm7_off & 0x40)
3460 address = address - offset;
3461 else
3462 address = address + offset;
3463 }
3464
3465 record_buf_mem[0] = datasize / 8;
3466 record_buf_mem[1] = address;
3467 record_buf_mem[2] = datasize / 8;
3468 record_buf_mem[3] = address + (datasize / 8);
3469 aarch64_insn_r->mem_rec_count = 2;
3470 }
3471 if (bit (aarch64_insn_r->aarch64_insn, 23))
3472 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3473 }
3474 /* Load/store register (unsigned immediate) instructions. */
3475 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3476 {
3477 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3478 if (!(opc >> 1))
3479 if (opc & 0x01)
3480 ld_flag = 0x01;
3481 else
3482 ld_flag = 0x0;
3483 else
3484 if (size_bits != 0x03)
3485 ld_flag = 0x01;
3486 else
3487 return AARCH64_RECORD_UNKNOWN;
3488
3489 if (record_debug)
3490 {
b277c936
PL
3491 debug_printf ("Process record: load/store (unsigned immediate):"
3492 " size %x V %d opc %x\n", size_bits, vector_flag,
3493 opc);
99afc88b
OJ
3494 }
3495
3496 if (!ld_flag)
3497 {
3498 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3499 datasize = 8 << size_bits;
3500 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3501 &address);
3502 offset = offset << size_bits;
3503 address = address + offset;
3504
3505 record_buf_mem[0] = datasize >> 3;
3506 record_buf_mem[1] = address;
3507 aarch64_insn_r->mem_rec_count = 1;
3508 }
3509 else
3510 {
3511 if (vector_flag)
3512 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3513 else
3514 record_buf[0] = reg_rt;
3515 aarch64_insn_r->reg_rec_count = 1;
3516 }
3517 }
3518 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3519 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3520 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3521 {
3522 if (record_debug)
b277c936 3523 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3524 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3525 if (!(opc >> 1))
3526 if (opc & 0x01)
3527 ld_flag = 0x01;
3528 else
3529 ld_flag = 0x0;
3530 else
3531 if (size_bits != 0x03)
3532 ld_flag = 0x01;
3533 else
3534 return AARCH64_RECORD_UNKNOWN;
3535
3536 if (!ld_flag)
3537 {
3538 uint64_t reg_rm_val;
3539 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3540 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3541 if (bit (aarch64_insn_r->aarch64_insn, 12))
3542 offset = reg_rm_val << size_bits;
3543 else
3544 offset = reg_rm_val;
3545 datasize = 8 << size_bits;
3546 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3547 &address);
3548 address = address + offset;
3549 record_buf_mem[0] = datasize >> 3;
3550 record_buf_mem[1] = address;
3551 aarch64_insn_r->mem_rec_count = 1;
3552 }
3553 else
3554 {
3555 if (vector_flag)
3556 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3557 else
3558 record_buf[0] = reg_rt;
3559 aarch64_insn_r->reg_rec_count = 1;
3560 }
3561 }
3562 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3563 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3564 && !insn_bit21)
99afc88b
OJ
3565 {
3566 if (record_debug)
3567 {
b277c936
PL
3568 debug_printf ("Process record: load/store "
3569 "(immediate and unprivileged)\n");
99afc88b
OJ
3570 }
3571 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3572 if (!(opc >> 1))
3573 if (opc & 0x01)
3574 ld_flag = 0x01;
3575 else
3576 ld_flag = 0x0;
3577 else
3578 if (size_bits != 0x03)
3579 ld_flag = 0x01;
3580 else
3581 return AARCH64_RECORD_UNKNOWN;
3582
3583 if (!ld_flag)
3584 {
3585 uint16_t imm9_off;
3586 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3587 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3588 datasize = 8 << size_bits;
3589 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3590 &address);
3591 if (insn_bits10_11 != 0x01)
3592 {
3593 if (imm9_off & 0x0100)
3594 address = address - offset;
3595 else
3596 address = address + offset;
3597 }
3598 record_buf_mem[0] = datasize >> 3;
3599 record_buf_mem[1] = address;
3600 aarch64_insn_r->mem_rec_count = 1;
3601 }
3602 else
3603 {
3604 if (vector_flag)
3605 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3606 else
3607 record_buf[0] = reg_rt;
3608 aarch64_insn_r->reg_rec_count = 1;
3609 }
3610 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3611 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3612 }
3613 /* Advanced SIMD load/store instructions. */
3614 else
3615 return aarch64_record_asimd_load_store (aarch64_insn_r);
3616
3617 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3618 record_buf_mem);
3619 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3620 record_buf);
3621 return AARCH64_RECORD_SUCCESS;
3622}
3623
3624/* Record handler for data processing SIMD and floating point instructions. */
3625
3626static unsigned int
3627aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3628{
3629 uint8_t insn_bit21, opcode, rmode, reg_rd;
3630 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3631 uint8_t insn_bits11_14;
3632 uint32_t record_buf[2];
3633
3634 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3635 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3636 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3637 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3638 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3639 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3640 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3641 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3642 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3643
3644 if (record_debug)
b277c936 3645 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3646
3647 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3648 {
3649 /* Floating point - fixed point conversion instructions. */
3650 if (!insn_bit21)
3651 {
3652 if (record_debug)
b277c936 3653 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3654
3655 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3656 record_buf[0] = reg_rd;
3657 else
3658 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3659 }
3660 /* Floating point - conditional compare instructions. */
3661 else if (insn_bits10_11 == 0x01)
3662 {
3663 if (record_debug)
b277c936 3664 debug_printf ("FP - conditional compare");
99afc88b
OJ
3665
3666 record_buf[0] = AARCH64_CPSR_REGNUM;
3667 }
3668 /* Floating point - data processing (2-source) and
3669 conditional select instructions. */
3670 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3671 {
3672 if (record_debug)
b277c936 3673 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3674
3675 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3676 }
3677 else if (insn_bits10_11 == 0x00)
3678 {
3679 /* Floating point - immediate instructions. */
3680 if ((insn_bits12_15 & 0x01) == 0x01
3681 || (insn_bits12_15 & 0x07) == 0x04)
3682 {
3683 if (record_debug)
b277c936 3684 debug_printf ("FP - immediate");
99afc88b
OJ
3685 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3686 }
3687 /* Floating point - compare instructions. */
3688 else if ((insn_bits12_15 & 0x03) == 0x02)
3689 {
3690 if (record_debug)
b277c936 3691 debug_printf ("FP - immediate");
99afc88b
OJ
3692 record_buf[0] = AARCH64_CPSR_REGNUM;
3693 }
3694 /* Floating point - integer conversions instructions. */
f62fce35 3695 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3696 {
3697 /* Convert float to integer instruction. */
3698 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3699 {
3700 if (record_debug)
b277c936 3701 debug_printf ("float to int conversion");
99afc88b
OJ
3702
3703 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3704 }
3705 /* Convert integer to float instruction. */
3706 else if ((opcode >> 1) == 0x01 && !rmode)
3707 {
3708 if (record_debug)
b277c936 3709 debug_printf ("int to float conversion");
99afc88b
OJ
3710
3711 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3712 }
3713 /* Move float to integer instruction. */
3714 else if ((opcode >> 1) == 0x03)
3715 {
3716 if (record_debug)
b277c936 3717 debug_printf ("move float to int");
99afc88b
OJ
3718
3719 if (!(opcode & 0x01))
3720 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3721 else
3722 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3723 }
f62fce35
YQ
3724 else
3725 return AARCH64_RECORD_UNKNOWN;
99afc88b 3726 }
f62fce35
YQ
3727 else
3728 return AARCH64_RECORD_UNKNOWN;
99afc88b 3729 }
f62fce35
YQ
3730 else
3731 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3732 }
3733 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3734 {
3735 if (record_debug)
b277c936 3736 debug_printf ("SIMD copy");
99afc88b
OJ
3737
3738 /* Advanced SIMD copy instructions. */
3739 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3740 && !bit (aarch64_insn_r->aarch64_insn, 15)
3741 && bit (aarch64_insn_r->aarch64_insn, 10))
3742 {
3743 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3744 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3745 else
3746 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3747 }
3748 else
3749 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3750 }
3751 /* All remaining floating point or advanced SIMD instructions. */
3752 else
3753 {
3754 if (record_debug)
b277c936 3755 debug_printf ("all remain");
99afc88b
OJ
3756
3757 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3758 }
3759
3760 if (record_debug)
b277c936 3761 debug_printf ("\n");
99afc88b
OJ
3762
3763 aarch64_insn_r->reg_rec_count++;
3764 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3765 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3766 record_buf);
3767 return AARCH64_RECORD_SUCCESS;
3768}
3769
3770/* Decodes insns type and invokes its record handler. */
3771
3772static unsigned int
3773aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3774{
3775 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3776
3777 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3778 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3779 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3780 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3781
3782 /* Data processing - immediate instructions. */
3783 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3784 return aarch64_record_data_proc_imm (aarch64_insn_r);
3785
3786 /* Branch, exception generation and system instructions. */
3787 if (ins_bit26 && !ins_bit27 && ins_bit28)
3788 return aarch64_record_branch_except_sys (aarch64_insn_r);
3789
3790 /* Load and store instructions. */
3791 if (!ins_bit25 && ins_bit27)
3792 return aarch64_record_load_store (aarch64_insn_r);
3793
3794 /* Data processing - register instructions. */
3795 if (ins_bit25 && !ins_bit26 && ins_bit27)
3796 return aarch64_record_data_proc_reg (aarch64_insn_r);
3797
3798 /* Data processing - SIMD and floating point instructions. */
3799 if (ins_bit25 && ins_bit26 && ins_bit27)
3800 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3801
3802 return AARCH64_RECORD_UNSUPPORTED;
3803}
3804
3805/* Cleans up local record registers and memory allocations. */
3806
3807static void
3808deallocate_reg_mem (insn_decode_record *record)
3809{
3810 xfree (record->aarch64_regs);
3811 xfree (record->aarch64_mems);
3812}
3813
3814/* Parse the current instruction and record the values of the registers and
3815 memory that will be changed in current instruction to record_arch_list
3816 return -1 if something is wrong. */
3817
3818int
3819aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3820 CORE_ADDR insn_addr)
3821{
3822 uint32_t rec_no = 0;
3823 uint8_t insn_size = 4;
3824 uint32_t ret = 0;
3825 ULONGEST t_bit = 0, insn_id = 0;
3826 gdb_byte buf[insn_size];
3827 insn_decode_record aarch64_record;
3828
3829 memset (&buf[0], 0, insn_size);
3830 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3831 target_read_memory (insn_addr, &buf[0], insn_size);
3832 aarch64_record.aarch64_insn
3833 = (uint32_t) extract_unsigned_integer (&buf[0],
3834 insn_size,
3835 gdbarch_byte_order (gdbarch));
3836 aarch64_record.regcache = regcache;
3837 aarch64_record.this_addr = insn_addr;
3838 aarch64_record.gdbarch = gdbarch;
3839
3840 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3841 if (ret == AARCH64_RECORD_UNSUPPORTED)
3842 {
3843 printf_unfiltered (_("Process record does not support instruction "
3844 "0x%0x at address %s.\n"),
3845 aarch64_record.aarch64_insn,
3846 paddress (gdbarch, insn_addr));
3847 ret = -1;
3848 }
3849
3850 if (0 == ret)
3851 {
3852 /* Record registers. */
3853 record_full_arch_list_add_reg (aarch64_record.regcache,
3854 AARCH64_PC_REGNUM);
3855 /* Always record register CPSR. */
3856 record_full_arch_list_add_reg (aarch64_record.regcache,
3857 AARCH64_CPSR_REGNUM);
3858 if (aarch64_record.aarch64_regs)
3859 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3860 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3861 aarch64_record.aarch64_regs[rec_no]))
3862 ret = -1;
3863
3864 /* Record memories. */
3865 if (aarch64_record.aarch64_mems)
3866 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3867 if (record_full_arch_list_add_mem
3868 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3869 aarch64_record.aarch64_mems[rec_no].len))
3870 ret = -1;
3871
3872 if (record_full_arch_list_add_end ())
3873 ret = -1;
3874 }
3875
3876 deallocate_reg_mem (&aarch64_record);
3877 return ret;
3878}
This page took 0.3754 seconds and 4 git commands to generate.