New test gdb.arch/arm-neon.exp
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
160 {
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
175 /* Is the target available to read from? */
176 int available_p;
177
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188 };
189
190 static void
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193 {
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195 }
196
197 /* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201 static CORE_ADDR
202 aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205 {
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
220 aarch64_inst inst;
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
230 {
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
257 }
258 else if (inst.opcode->iclass == branch_imm)
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
263 else if (inst.opcode->iclass == condbranch)
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
268 else if (inst.opcode->iclass == branch_reg)
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
273 else if (inst.opcode->iclass == compbranch)
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
285 {
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=0x%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
304 }
305 break;
306 }
307 }
308 else if (inst.opcode->op == OP_STUR)
309 {
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
322 is64 ? 8 : 4, regs[rt]);
323 }
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
328 {
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
338
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
344 break;
345
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
348 break;
349
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
351 regs[rt1]);
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
353 regs[rt2]);
354
355 if (inst.operands[2].addr.writeback)
356 regs[rn] = pv_add_constant (regs[rn], imm);
357
358 }
359 else if (inst.opcode->iclass == testbranch)
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else
365 {
366 if (aarch64_debug)
367 {
368 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
369 " opcode=0x%x\n",
370 core_addr_to_string_nz (start), insn);
371 }
372 break;
373 }
374 }
375
376 if (cache == NULL)
377 {
378 do_cleanups (back_to);
379 return start;
380 }
381
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
383 {
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
387 }
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
389 {
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
393 }
394 else
395 {
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
399 }
400
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
402 {
403 CORE_ADDR offset;
404
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
407 }
408
409 do_cleanups (back_to);
410 return start;
411 }
412
413 /* Implement the "skip_prologue" gdbarch method. */
414
415 static CORE_ADDR
416 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
417 {
418 unsigned long inst;
419 CORE_ADDR skip_pc;
420 CORE_ADDR func_addr, limit_pc;
421 struct symtab_and_line sal;
422
423 /* See if we can determine the end of the prologue via the symbol
424 table. If so, then return either PC, or the PC after the
425 prologue, whichever is greater. */
426 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
427 {
428 CORE_ADDR post_prologue_pc
429 = skip_prologue_using_sal (gdbarch, func_addr);
430
431 if (post_prologue_pc != 0)
432 return max (pc, post_prologue_pc);
433 }
434
435 /* Can't determine prologue from the symbol table, need to examine
436 instructions. */
437
438 /* Find an upper limit on the function prologue using the debug
439 information. If the debug information could not be used to
440 provide that bound, then use an arbitrary large number as the
441 upper bound. */
442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
443 if (limit_pc == 0)
444 limit_pc = pc + 128; /* Magic. */
445
446 /* Try disassembling prologue. */
447 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
448 }
449
450 /* Scan the function prologue for THIS_FRAME and populate the prologue
451 cache CACHE. */
452
453 static void
454 aarch64_scan_prologue (struct frame_info *this_frame,
455 struct aarch64_prologue_cache *cache)
456 {
457 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
458 CORE_ADDR prologue_start;
459 CORE_ADDR prologue_end;
460 CORE_ADDR prev_pc = get_frame_pc (this_frame);
461 struct gdbarch *gdbarch = get_frame_arch (this_frame);
462
463 cache->prev_pc = prev_pc;
464
465 /* Assume we do not find a frame. */
466 cache->framereg = -1;
467 cache->framesize = 0;
468
469 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
470 &prologue_end))
471 {
472 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
473
474 if (sal.line == 0)
475 {
476 /* No line info so use the current PC. */
477 prologue_end = prev_pc;
478 }
479 else if (sal.end < prologue_end)
480 {
481 /* The next line begins after the function end. */
482 prologue_end = sal.end;
483 }
484
485 prologue_end = min (prologue_end, prev_pc);
486 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
487 }
488 else
489 {
490 CORE_ADDR frame_loc;
491 LONGEST saved_fp;
492 LONGEST saved_lr;
493 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
494
495 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
496 if (frame_loc == 0)
497 return;
498
499 cache->framereg = AARCH64_FP_REGNUM;
500 cache->framesize = 16;
501 cache->saved_regs[29].addr = 0;
502 cache->saved_regs[30].addr = 8;
503 }
504 }
505
506 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
507 function may throw an exception if the inferior's registers or memory is
508 not available. */
509
510 static void
511 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
512 struct aarch64_prologue_cache *cache)
513 {
514 CORE_ADDR unwound_fp;
515 int reg;
516
517 aarch64_scan_prologue (this_frame, cache);
518
519 if (cache->framereg == -1)
520 return;
521
522 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
523 if (unwound_fp == 0)
524 return;
525
526 cache->prev_sp = unwound_fp + cache->framesize;
527
528 /* Calculate actual addresses of saved registers using offsets
529 determined by aarch64_analyze_prologue. */
530 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
531 if (trad_frame_addr_p (cache->saved_regs, reg))
532 cache->saved_regs[reg].addr += cache->prev_sp;
533
534 cache->func = get_frame_func (this_frame);
535
536 cache->available_p = 1;
537 }
538
539 /* Allocate and fill in *THIS_CACHE with information about the prologue of
540 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
541 Return a pointer to the current aarch64_prologue_cache in
542 *THIS_CACHE. */
543
544 static struct aarch64_prologue_cache *
545 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
546 {
547 struct aarch64_prologue_cache *cache;
548
549 if (*this_cache != NULL)
550 return (struct aarch64_prologue_cache *) *this_cache;
551
552 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
553 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
554 *this_cache = cache;
555
556 TRY
557 {
558 aarch64_make_prologue_cache_1 (this_frame, cache);
559 }
560 CATCH (ex, RETURN_MASK_ERROR)
561 {
562 if (ex.error != NOT_AVAILABLE_ERROR)
563 throw_exception (ex);
564 }
565 END_CATCH
566
567 return cache;
568 }
569
570 /* Implement the "stop_reason" frame_unwind method. */
571
572 static enum unwind_stop_reason
573 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
574 void **this_cache)
575 {
576 struct aarch64_prologue_cache *cache
577 = aarch64_make_prologue_cache (this_frame, this_cache);
578
579 if (!cache->available_p)
580 return UNWIND_UNAVAILABLE;
581
582 /* Halt the backtrace at "_start". */
583 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
584 return UNWIND_OUTERMOST;
585
586 /* We've hit a wall, stop. */
587 if (cache->prev_sp == 0)
588 return UNWIND_OUTERMOST;
589
590 return UNWIND_NO_REASON;
591 }
592
593 /* Our frame ID for a normal frame is the current function's starting
594 PC and the caller's SP when we were called. */
595
596 static void
597 aarch64_prologue_this_id (struct frame_info *this_frame,
598 void **this_cache, struct frame_id *this_id)
599 {
600 struct aarch64_prologue_cache *cache
601 = aarch64_make_prologue_cache (this_frame, this_cache);
602
603 if (!cache->available_p)
604 *this_id = frame_id_build_unavailable_stack (cache->func);
605 else
606 *this_id = frame_id_build (cache->prev_sp, cache->func);
607 }
608
609 /* Implement the "prev_register" frame_unwind method. */
610
611 static struct value *
612 aarch64_prologue_prev_register (struct frame_info *this_frame,
613 void **this_cache, int prev_regnum)
614 {
615 struct gdbarch *gdbarch = get_frame_arch (this_frame);
616 struct aarch64_prologue_cache *cache
617 = aarch64_make_prologue_cache (this_frame, this_cache);
618
619 /* If we are asked to unwind the PC, then we need to return the LR
620 instead. The prologue may save PC, but it will point into this
621 frame's prologue, not the next frame's resume location. */
622 if (prev_regnum == AARCH64_PC_REGNUM)
623 {
624 CORE_ADDR lr;
625
626 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
627 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
628 }
629
630 /* SP is generally not saved to the stack, but this frame is
631 identified by the next frame's stack pointer at the time of the
632 call. The value was already reconstructed into PREV_SP. */
633 /*
634 +----------+ ^
635 | saved lr | |
636 +->| saved fp |--+
637 | | |
638 | | | <- Previous SP
639 | +----------+
640 | | saved lr |
641 +--| saved fp |<- FP
642 | |
643 | |<- SP
644 +----------+ */
645 if (prev_regnum == AARCH64_SP_REGNUM)
646 return frame_unwind_got_constant (this_frame, prev_regnum,
647 cache->prev_sp);
648
649 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
650 prev_regnum);
651 }
652
653 /* AArch64 prologue unwinder. */
654 struct frame_unwind aarch64_prologue_unwind =
655 {
656 NORMAL_FRAME,
657 aarch64_prologue_frame_unwind_stop_reason,
658 aarch64_prologue_this_id,
659 aarch64_prologue_prev_register,
660 NULL,
661 default_frame_sniffer
662 };
663
664 /* Allocate and fill in *THIS_CACHE with information about the prologue of
665 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
666 Return a pointer to the current aarch64_prologue_cache in
667 *THIS_CACHE. */
668
669 static struct aarch64_prologue_cache *
670 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
671 {
672 struct aarch64_prologue_cache *cache;
673
674 if (*this_cache != NULL)
675 return (struct aarch64_prologue_cache *) *this_cache;
676
677 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
678 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
679 *this_cache = cache;
680
681 TRY
682 {
683 cache->prev_sp = get_frame_register_unsigned (this_frame,
684 AARCH64_SP_REGNUM);
685 cache->prev_pc = get_frame_pc (this_frame);
686 cache->available_p = 1;
687 }
688 CATCH (ex, RETURN_MASK_ERROR)
689 {
690 if (ex.error != NOT_AVAILABLE_ERROR)
691 throw_exception (ex);
692 }
693 END_CATCH
694
695 return cache;
696 }
697
698 /* Implement the "stop_reason" frame_unwind method. */
699
700 static enum unwind_stop_reason
701 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
702 void **this_cache)
703 {
704 struct aarch64_prologue_cache *cache
705 = aarch64_make_stub_cache (this_frame, this_cache);
706
707 if (!cache->available_p)
708 return UNWIND_UNAVAILABLE;
709
710 return UNWIND_NO_REASON;
711 }
712
713 /* Our frame ID for a stub frame is the current SP and LR. */
714
715 static void
716 aarch64_stub_this_id (struct frame_info *this_frame,
717 void **this_cache, struct frame_id *this_id)
718 {
719 struct aarch64_prologue_cache *cache
720 = aarch64_make_stub_cache (this_frame, this_cache);
721
722 if (cache->available_p)
723 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
724 else
725 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
726 }
727
728 /* Implement the "sniffer" frame_unwind method. */
729
730 static int
731 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
732 struct frame_info *this_frame,
733 void **this_prologue_cache)
734 {
735 CORE_ADDR addr_in_block;
736 gdb_byte dummy[4];
737
738 addr_in_block = get_frame_address_in_block (this_frame);
739 if (in_plt_section (addr_in_block)
740 /* We also use the stub winder if the target memory is unreadable
741 to avoid having the prologue unwinder trying to read it. */
742 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
743 return 1;
744
745 return 0;
746 }
747
748 /* AArch64 stub unwinder. */
749 struct frame_unwind aarch64_stub_unwind =
750 {
751 NORMAL_FRAME,
752 aarch64_stub_frame_unwind_stop_reason,
753 aarch64_stub_this_id,
754 aarch64_prologue_prev_register,
755 NULL,
756 aarch64_stub_unwind_sniffer
757 };
758
759 /* Return the frame base address of *THIS_FRAME. */
760
761 static CORE_ADDR
762 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
763 {
764 struct aarch64_prologue_cache *cache
765 = aarch64_make_prologue_cache (this_frame, this_cache);
766
767 return cache->prev_sp - cache->framesize;
768 }
769
770 /* AArch64 default frame base information. */
771 struct frame_base aarch64_normal_base =
772 {
773 &aarch64_prologue_unwind,
774 aarch64_normal_frame_base,
775 aarch64_normal_frame_base,
776 aarch64_normal_frame_base
777 };
778
779 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
780 dummy frame. The frame ID's base needs to match the TOS value
781 saved by save_dummy_frame_tos () and returned from
782 aarch64_push_dummy_call, and the PC needs to match the dummy
783 frame's breakpoint. */
784
785 static struct frame_id
786 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
787 {
788 return frame_id_build (get_frame_register_unsigned (this_frame,
789 AARCH64_SP_REGNUM),
790 get_frame_pc (this_frame));
791 }
792
793 /* Implement the "unwind_pc" gdbarch method. */
794
795 static CORE_ADDR
796 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
797 {
798 CORE_ADDR pc
799 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
800
801 return pc;
802 }
803
804 /* Implement the "unwind_sp" gdbarch method. */
805
806 static CORE_ADDR
807 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
808 {
809 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
810 }
811
812 /* Return the value of the REGNUM register in the previous frame of
813 *THIS_FRAME. */
814
815 static struct value *
816 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
817 void **this_cache, int regnum)
818 {
819 struct gdbarch *gdbarch = get_frame_arch (this_frame);
820 CORE_ADDR lr;
821
822 switch (regnum)
823 {
824 case AARCH64_PC_REGNUM:
825 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
826 return frame_unwind_got_constant (this_frame, regnum, lr);
827
828 default:
829 internal_error (__FILE__, __LINE__,
830 _("Unexpected register %d"), regnum);
831 }
832 }
833
834 /* Implement the "init_reg" dwarf2_frame_ops method. */
835
836 static void
837 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
838 struct dwarf2_frame_state_reg *reg,
839 struct frame_info *this_frame)
840 {
841 switch (regnum)
842 {
843 case AARCH64_PC_REGNUM:
844 reg->how = DWARF2_FRAME_REG_FN;
845 reg->loc.fn = aarch64_dwarf2_prev_register;
846 break;
847 case AARCH64_SP_REGNUM:
848 reg->how = DWARF2_FRAME_REG_CFA;
849 break;
850 }
851 }
852
853 /* When arguments must be pushed onto the stack, they go on in reverse
854 order. The code below implements a FILO (stack) to do this. */
855
856 typedef struct
857 {
858 /* Value to pass on stack. */
859 const gdb_byte *data;
860
861 /* Size in bytes of value to pass on stack. */
862 int len;
863 } stack_item_t;
864
865 DEF_VEC_O (stack_item_t);
866
867 /* Return the alignment (in bytes) of the given type. */
868
869 static int
870 aarch64_type_align (struct type *t)
871 {
872 int n;
873 int align;
874 int falign;
875
876 t = check_typedef (t);
877 switch (TYPE_CODE (t))
878 {
879 default:
880 /* Should never happen. */
881 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
882 return 4;
883
884 case TYPE_CODE_PTR:
885 case TYPE_CODE_ENUM:
886 case TYPE_CODE_INT:
887 case TYPE_CODE_FLT:
888 case TYPE_CODE_SET:
889 case TYPE_CODE_RANGE:
890 case TYPE_CODE_BITSTRING:
891 case TYPE_CODE_REF:
892 case TYPE_CODE_CHAR:
893 case TYPE_CODE_BOOL:
894 return TYPE_LENGTH (t);
895
896 case TYPE_CODE_ARRAY:
897 if (TYPE_VECTOR (t))
898 {
899 /* Use the natural alignment for vector types (the same for
900 scalar type), but the maximum alignment is 128-bit. */
901 if (TYPE_LENGTH (t) > 16)
902 return 16;
903 else
904 return TYPE_LENGTH (t);
905 }
906 else
907 return aarch64_type_align (TYPE_TARGET_TYPE (t));
908 case TYPE_CODE_COMPLEX:
909 return aarch64_type_align (TYPE_TARGET_TYPE (t));
910
911 case TYPE_CODE_STRUCT:
912 case TYPE_CODE_UNION:
913 align = 1;
914 for (n = 0; n < TYPE_NFIELDS (t); n++)
915 {
916 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
917 if (falign > align)
918 align = falign;
919 }
920 return align;
921 }
922 }
923
924 /* Return 1 if *TY is a homogeneous floating-point aggregate or
925 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
926 document; otherwise return 0. */
927
928 static int
929 is_hfa_or_hva (struct type *ty)
930 {
931 switch (TYPE_CODE (ty))
932 {
933 case TYPE_CODE_ARRAY:
934 {
935 struct type *target_ty = TYPE_TARGET_TYPE (ty);
936
937 if (TYPE_VECTOR (ty))
938 return 0;
939
940 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
941 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
942 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
943 && TYPE_VECTOR (target_ty))))
944 return 1;
945 break;
946 }
947
948 case TYPE_CODE_UNION:
949 case TYPE_CODE_STRUCT:
950 {
951 /* HFA or HVA has at most four members. */
952 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
953 {
954 struct type *member0_type;
955
956 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
957 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
958 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
959 && TYPE_VECTOR (member0_type)))
960 {
961 int i;
962
963 for (i = 0; i < TYPE_NFIELDS (ty); i++)
964 {
965 struct type *member1_type;
966
967 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
968 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
969 || (TYPE_LENGTH (member0_type)
970 != TYPE_LENGTH (member1_type)))
971 return 0;
972 }
973 return 1;
974 }
975 }
976 return 0;
977 }
978
979 default:
980 break;
981 }
982
983 return 0;
984 }
985
986 /* AArch64 function call information structure. */
987 struct aarch64_call_info
988 {
989 /* the current argument number. */
990 unsigned argnum;
991
992 /* The next general purpose register number, equivalent to NGRN as
993 described in the AArch64 Procedure Call Standard. */
994 unsigned ngrn;
995
996 /* The next SIMD and floating point register number, equivalent to
997 NSRN as described in the AArch64 Procedure Call Standard. */
998 unsigned nsrn;
999
1000 /* The next stacked argument address, equivalent to NSAA as
1001 described in the AArch64 Procedure Call Standard. */
1002 unsigned nsaa;
1003
1004 /* Stack item vector. */
1005 VEC(stack_item_t) *si;
1006 };
1007
1008 /* Pass a value in a sequence of consecutive X registers. The caller
1009 is responsbile for ensuring sufficient registers are available. */
1010
1011 static void
1012 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1013 struct aarch64_call_info *info, struct type *type,
1014 struct value *arg)
1015 {
1016 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1017 int len = TYPE_LENGTH (type);
1018 enum type_code typecode = TYPE_CODE (type);
1019 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1020 const bfd_byte *buf = value_contents (arg);
1021
1022 info->argnum++;
1023
1024 while (len > 0)
1025 {
1026 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1027 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1028 byte_order);
1029
1030
1031 /* Adjust sub-word struct/union args when big-endian. */
1032 if (byte_order == BFD_ENDIAN_BIG
1033 && partial_len < X_REGISTER_SIZE
1034 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1035 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1036
1037 if (aarch64_debug)
1038 {
1039 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1040 gdbarch_register_name (gdbarch, regnum),
1041 phex (regval, X_REGISTER_SIZE));
1042 }
1043 regcache_cooked_write_unsigned (regcache, regnum, regval);
1044 len -= partial_len;
1045 buf += partial_len;
1046 regnum++;
1047 }
1048 }
1049
1050 /* Attempt to marshall a value in a V register. Return 1 if
1051 successful, or 0 if insufficient registers are available. This
1052 function, unlike the equivalent pass_in_x() function does not
1053 handle arguments spread across multiple registers. */
1054
1055 static int
1056 pass_in_v (struct gdbarch *gdbarch,
1057 struct regcache *regcache,
1058 struct aarch64_call_info *info,
1059 int len, const bfd_byte *buf)
1060 {
1061 if (info->nsrn < 8)
1062 {
1063 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1064 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1065 gdb_byte reg[V_REGISTER_SIZE];
1066
1067 info->argnum++;
1068 info->nsrn++;
1069
1070 memset (reg, 0, sizeof (reg));
1071 /* PCS C.1, the argument is allocated to the least significant
1072 bits of V register. */
1073 memcpy (reg, buf, len);
1074 regcache_cooked_write (regcache, regnum, reg);
1075
1076 if (aarch64_debug)
1077 {
1078 debug_printf ("arg %d in %s\n", info->argnum,
1079 gdbarch_register_name (gdbarch, regnum));
1080 }
1081 return 1;
1082 }
1083 info->nsrn = 8;
1084 return 0;
1085 }
1086
1087 /* Marshall an argument onto the stack. */
1088
1089 static void
1090 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1091 struct value *arg)
1092 {
1093 const bfd_byte *buf = value_contents (arg);
1094 int len = TYPE_LENGTH (type);
1095 int align;
1096 stack_item_t item;
1097
1098 info->argnum++;
1099
1100 align = aarch64_type_align (type);
1101
1102 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1103 Natural alignment of the argument's type. */
1104 align = align_up (align, 8);
1105
1106 /* The AArch64 PCS requires at most doubleword alignment. */
1107 if (align > 16)
1108 align = 16;
1109
1110 if (aarch64_debug)
1111 {
1112 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1113 info->nsaa);
1114 }
1115
1116 item.len = len;
1117 item.data = buf;
1118 VEC_safe_push (stack_item_t, info->si, &item);
1119
1120 info->nsaa += len;
1121 if (info->nsaa & (align - 1))
1122 {
1123 /* Push stack alignment padding. */
1124 int pad = align - (info->nsaa & (align - 1));
1125
1126 item.len = pad;
1127 item.data = buf;
1128
1129 VEC_safe_push (stack_item_t, info->si, &item);
1130 info->nsaa += pad;
1131 }
1132 }
1133
1134 /* Marshall an argument into a sequence of one or more consecutive X
1135 registers or, if insufficient X registers are available then onto
1136 the stack. */
1137
1138 static void
1139 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1140 struct aarch64_call_info *info, struct type *type,
1141 struct value *arg)
1142 {
1143 int len = TYPE_LENGTH (type);
1144 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1145
1146 /* PCS C.13 - Pass in registers if we have enough spare */
1147 if (info->ngrn + nregs <= 8)
1148 {
1149 pass_in_x (gdbarch, regcache, info, type, arg);
1150 info->ngrn += nregs;
1151 }
1152 else
1153 {
1154 info->ngrn = 8;
1155 pass_on_stack (info, type, arg);
1156 }
1157 }
1158
1159 /* Pass a value in a V register, or on the stack if insufficient are
1160 available. */
1161
1162 static void
1163 pass_in_v_or_stack (struct gdbarch *gdbarch,
1164 struct regcache *regcache,
1165 struct aarch64_call_info *info,
1166 struct type *type,
1167 struct value *arg)
1168 {
1169 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1170 value_contents (arg)))
1171 pass_on_stack (info, type, arg);
1172 }
1173
1174 /* Implement the "push_dummy_call" gdbarch method. */
1175
1176 static CORE_ADDR
1177 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1178 struct regcache *regcache, CORE_ADDR bp_addr,
1179 int nargs,
1180 struct value **args, CORE_ADDR sp, int struct_return,
1181 CORE_ADDR struct_addr)
1182 {
1183 int nstack = 0;
1184 int argnum;
1185 int x_argreg;
1186 int v_argreg;
1187 struct aarch64_call_info info;
1188 struct type *func_type;
1189 struct type *return_type;
1190 int lang_struct_return;
1191
1192 memset (&info, 0, sizeof (info));
1193
1194 /* We need to know what the type of the called function is in order
1195 to determine the number of named/anonymous arguments for the
1196 actual argument placement, and the return type in order to handle
1197 return value correctly.
1198
1199 The generic code above us views the decision of return in memory
1200 or return in registers as a two stage processes. The language
1201 handler is consulted first and may decide to return in memory (eg
1202 class with copy constructor returned by value), this will cause
1203 the generic code to allocate space AND insert an initial leading
1204 argument.
1205
1206 If the language code does not decide to pass in memory then the
1207 target code is consulted.
1208
1209 If the language code decides to pass in memory we want to move
1210 the pointer inserted as the initial argument from the argument
1211 list and into X8, the conventional AArch64 struct return pointer
1212 register.
1213
1214 This is slightly awkward, ideally the flag "lang_struct_return"
1215 would be passed to the targets implementation of push_dummy_call.
1216 Rather that change the target interface we call the language code
1217 directly ourselves. */
1218
1219 func_type = check_typedef (value_type (function));
1220
1221 /* Dereference function pointer types. */
1222 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1223 func_type = TYPE_TARGET_TYPE (func_type);
1224
1225 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1226 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1227
1228 /* If language_pass_by_reference () returned true we will have been
1229 given an additional initial argument, a hidden pointer to the
1230 return slot in memory. */
1231 return_type = TYPE_TARGET_TYPE (func_type);
1232 lang_struct_return = language_pass_by_reference (return_type);
1233
1234 /* Set the return address. For the AArch64, the return breakpoint
1235 is always at BP_ADDR. */
1236 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1237
1238 /* If we were given an initial argument for the return slot because
1239 lang_struct_return was true, lose it. */
1240 if (lang_struct_return)
1241 {
1242 args++;
1243 nargs--;
1244 }
1245
1246 /* The struct_return pointer occupies X8. */
1247 if (struct_return || lang_struct_return)
1248 {
1249 if (aarch64_debug)
1250 {
1251 debug_printf ("struct return in %s = 0x%s\n",
1252 gdbarch_register_name (gdbarch,
1253 AARCH64_STRUCT_RETURN_REGNUM),
1254 paddress (gdbarch, struct_addr));
1255 }
1256 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1257 struct_addr);
1258 }
1259
1260 for (argnum = 0; argnum < nargs; argnum++)
1261 {
1262 struct value *arg = args[argnum];
1263 struct type *arg_type;
1264 int len;
1265
1266 arg_type = check_typedef (value_type (arg));
1267 len = TYPE_LENGTH (arg_type);
1268
1269 switch (TYPE_CODE (arg_type))
1270 {
1271 case TYPE_CODE_INT:
1272 case TYPE_CODE_BOOL:
1273 case TYPE_CODE_CHAR:
1274 case TYPE_CODE_RANGE:
1275 case TYPE_CODE_ENUM:
1276 if (len < 4)
1277 {
1278 /* Promote to 32 bit integer. */
1279 if (TYPE_UNSIGNED (arg_type))
1280 arg_type = builtin_type (gdbarch)->builtin_uint32;
1281 else
1282 arg_type = builtin_type (gdbarch)->builtin_int32;
1283 arg = value_cast (arg_type, arg);
1284 }
1285 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1286 break;
1287
1288 case TYPE_CODE_COMPLEX:
1289 if (info.nsrn <= 6)
1290 {
1291 const bfd_byte *buf = value_contents (arg);
1292 struct type *target_type =
1293 check_typedef (TYPE_TARGET_TYPE (arg_type));
1294
1295 pass_in_v (gdbarch, regcache, &info,
1296 TYPE_LENGTH (target_type), buf);
1297 pass_in_v (gdbarch, regcache, &info,
1298 TYPE_LENGTH (target_type),
1299 buf + TYPE_LENGTH (target_type));
1300 }
1301 else
1302 {
1303 info.nsrn = 8;
1304 pass_on_stack (&info, arg_type, arg);
1305 }
1306 break;
1307 case TYPE_CODE_FLT:
1308 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1309 break;
1310
1311 case TYPE_CODE_STRUCT:
1312 case TYPE_CODE_ARRAY:
1313 case TYPE_CODE_UNION:
1314 if (is_hfa_or_hva (arg_type))
1315 {
1316 int elements = TYPE_NFIELDS (arg_type);
1317
1318 /* Homogeneous Aggregates */
1319 if (info.nsrn + elements < 8)
1320 {
1321 int i;
1322
1323 for (i = 0; i < elements; i++)
1324 {
1325 /* We know that we have sufficient registers
1326 available therefore this will never fallback
1327 to the stack. */
1328 struct value *field =
1329 value_primitive_field (arg, 0, i, arg_type);
1330 struct type *field_type =
1331 check_typedef (value_type (field));
1332
1333 pass_in_v_or_stack (gdbarch, regcache, &info,
1334 field_type, field);
1335 }
1336 }
1337 else
1338 {
1339 info.nsrn = 8;
1340 pass_on_stack (&info, arg_type, arg);
1341 }
1342 }
1343 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1344 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1345 {
1346 /* Short vector types are passed in V registers. */
1347 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1348 }
1349 else if (len > 16)
1350 {
1351 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1352 invisible reference. */
1353
1354 /* Allocate aligned storage. */
1355 sp = align_down (sp - len, 16);
1356
1357 /* Write the real data into the stack. */
1358 write_memory (sp, value_contents (arg), len);
1359
1360 /* Construct the indirection. */
1361 arg_type = lookup_pointer_type (arg_type);
1362 arg = value_from_pointer (arg_type, sp);
1363 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1364 }
1365 else
1366 /* PCS C.15 / C.18 multiple values pass. */
1367 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1368 break;
1369
1370 default:
1371 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1372 break;
1373 }
1374 }
1375
1376 /* Make sure stack retains 16 byte alignment. */
1377 if (info.nsaa & 15)
1378 sp -= 16 - (info.nsaa & 15);
1379
1380 while (!VEC_empty (stack_item_t, info.si))
1381 {
1382 stack_item_t *si = VEC_last (stack_item_t, info.si);
1383
1384 sp -= si->len;
1385 write_memory (sp, si->data, si->len);
1386 VEC_pop (stack_item_t, info.si);
1387 }
1388
1389 VEC_free (stack_item_t, info.si);
1390
1391 /* Finally, update the SP register. */
1392 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1393
1394 return sp;
1395 }
1396
1397 /* Implement the "frame_align" gdbarch method. */
1398
1399 static CORE_ADDR
1400 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1401 {
1402 /* Align the stack to sixteen bytes. */
1403 return sp & ~(CORE_ADDR) 15;
1404 }
1405
1406 /* Return the type for an AdvSISD Q register. */
1407
1408 static struct type *
1409 aarch64_vnq_type (struct gdbarch *gdbarch)
1410 {
1411 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1412
1413 if (tdep->vnq_type == NULL)
1414 {
1415 struct type *t;
1416 struct type *elem;
1417
1418 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1419 TYPE_CODE_UNION);
1420
1421 elem = builtin_type (gdbarch)->builtin_uint128;
1422 append_composite_type_field (t, "u", elem);
1423
1424 elem = builtin_type (gdbarch)->builtin_int128;
1425 append_composite_type_field (t, "s", elem);
1426
1427 tdep->vnq_type = t;
1428 }
1429
1430 return tdep->vnq_type;
1431 }
1432
1433 /* Return the type for an AdvSISD D register. */
1434
1435 static struct type *
1436 aarch64_vnd_type (struct gdbarch *gdbarch)
1437 {
1438 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1439
1440 if (tdep->vnd_type == NULL)
1441 {
1442 struct type *t;
1443 struct type *elem;
1444
1445 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1446 TYPE_CODE_UNION);
1447
1448 elem = builtin_type (gdbarch)->builtin_double;
1449 append_composite_type_field (t, "f", elem);
1450
1451 elem = builtin_type (gdbarch)->builtin_uint64;
1452 append_composite_type_field (t, "u", elem);
1453
1454 elem = builtin_type (gdbarch)->builtin_int64;
1455 append_composite_type_field (t, "s", elem);
1456
1457 tdep->vnd_type = t;
1458 }
1459
1460 return tdep->vnd_type;
1461 }
1462
1463 /* Return the type for an AdvSISD S register. */
1464
1465 static struct type *
1466 aarch64_vns_type (struct gdbarch *gdbarch)
1467 {
1468 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1469
1470 if (tdep->vns_type == NULL)
1471 {
1472 struct type *t;
1473 struct type *elem;
1474
1475 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1476 TYPE_CODE_UNION);
1477
1478 elem = builtin_type (gdbarch)->builtin_float;
1479 append_composite_type_field (t, "f", elem);
1480
1481 elem = builtin_type (gdbarch)->builtin_uint32;
1482 append_composite_type_field (t, "u", elem);
1483
1484 elem = builtin_type (gdbarch)->builtin_int32;
1485 append_composite_type_field (t, "s", elem);
1486
1487 tdep->vns_type = t;
1488 }
1489
1490 return tdep->vns_type;
1491 }
1492
1493 /* Return the type for an AdvSISD H register. */
1494
1495 static struct type *
1496 aarch64_vnh_type (struct gdbarch *gdbarch)
1497 {
1498 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1499
1500 if (tdep->vnh_type == NULL)
1501 {
1502 struct type *t;
1503 struct type *elem;
1504
1505 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1506 TYPE_CODE_UNION);
1507
1508 elem = builtin_type (gdbarch)->builtin_uint16;
1509 append_composite_type_field (t, "u", elem);
1510
1511 elem = builtin_type (gdbarch)->builtin_int16;
1512 append_composite_type_field (t, "s", elem);
1513
1514 tdep->vnh_type = t;
1515 }
1516
1517 return tdep->vnh_type;
1518 }
1519
1520 /* Return the type for an AdvSISD B register. */
1521
1522 static struct type *
1523 aarch64_vnb_type (struct gdbarch *gdbarch)
1524 {
1525 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1526
1527 if (tdep->vnb_type == NULL)
1528 {
1529 struct type *t;
1530 struct type *elem;
1531
1532 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1533 TYPE_CODE_UNION);
1534
1535 elem = builtin_type (gdbarch)->builtin_uint8;
1536 append_composite_type_field (t, "u", elem);
1537
1538 elem = builtin_type (gdbarch)->builtin_int8;
1539 append_composite_type_field (t, "s", elem);
1540
1541 tdep->vnb_type = t;
1542 }
1543
1544 return tdep->vnb_type;
1545 }
1546
1547 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1548
1549 static int
1550 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1551 {
1552 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1553 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1554
1555 if (reg == AARCH64_DWARF_SP)
1556 return AARCH64_SP_REGNUM;
1557
1558 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1559 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1560
1561 return -1;
1562 }
1563 \f
1564
1565 /* Implement the "print_insn" gdbarch method. */
1566
1567 static int
1568 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1569 {
1570 info->symbols = NULL;
1571 return print_insn_aarch64 (memaddr, info);
1572 }
1573
1574 /* AArch64 BRK software debug mode instruction.
1575 Note that AArch64 code is always little-endian.
1576 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1577 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1578
1579 /* Implement the "breakpoint_from_pc" gdbarch method. */
1580
1581 static const gdb_byte *
1582 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1583 int *lenptr)
1584 {
1585 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1586
1587 *lenptr = sizeof (aarch64_default_breakpoint);
1588 return aarch64_default_breakpoint;
1589 }
1590
1591 /* Extract from an array REGS containing the (raw) register state a
1592 function return value of type TYPE, and copy that, in virtual
1593 format, into VALBUF. */
1594
1595 static void
1596 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1597 gdb_byte *valbuf)
1598 {
1599 struct gdbarch *gdbarch = get_regcache_arch (regs);
1600 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1601
1602 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1603 {
1604 bfd_byte buf[V_REGISTER_SIZE];
1605 int len = TYPE_LENGTH (type);
1606
1607 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1608 memcpy (valbuf, buf, len);
1609 }
1610 else if (TYPE_CODE (type) == TYPE_CODE_INT
1611 || TYPE_CODE (type) == TYPE_CODE_CHAR
1612 || TYPE_CODE (type) == TYPE_CODE_BOOL
1613 || TYPE_CODE (type) == TYPE_CODE_PTR
1614 || TYPE_CODE (type) == TYPE_CODE_REF
1615 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1616 {
1617 /* If the the type is a plain integer, then the access is
1618 straight-forward. Otherwise we have to play around a bit
1619 more. */
1620 int len = TYPE_LENGTH (type);
1621 int regno = AARCH64_X0_REGNUM;
1622 ULONGEST tmp;
1623
1624 while (len > 0)
1625 {
1626 /* By using store_unsigned_integer we avoid having to do
1627 anything special for small big-endian values. */
1628 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1629 store_unsigned_integer (valbuf,
1630 (len > X_REGISTER_SIZE
1631 ? X_REGISTER_SIZE : len), byte_order, tmp);
1632 len -= X_REGISTER_SIZE;
1633 valbuf += X_REGISTER_SIZE;
1634 }
1635 }
1636 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1637 {
1638 int regno = AARCH64_V0_REGNUM;
1639 bfd_byte buf[V_REGISTER_SIZE];
1640 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1641 int len = TYPE_LENGTH (target_type);
1642
1643 regcache_cooked_read (regs, regno, buf);
1644 memcpy (valbuf, buf, len);
1645 valbuf += len;
1646 regcache_cooked_read (regs, regno + 1, buf);
1647 memcpy (valbuf, buf, len);
1648 valbuf += len;
1649 }
1650 else if (is_hfa_or_hva (type))
1651 {
1652 int elements = TYPE_NFIELDS (type);
1653 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1654 int len = TYPE_LENGTH (member_type);
1655 int i;
1656
1657 for (i = 0; i < elements; i++)
1658 {
1659 int regno = AARCH64_V0_REGNUM + i;
1660 bfd_byte buf[V_REGISTER_SIZE];
1661
1662 if (aarch64_debug)
1663 {
1664 debug_printf ("read HFA or HVA return value element %d from %s\n",
1665 i + 1,
1666 gdbarch_register_name (gdbarch, regno));
1667 }
1668 regcache_cooked_read (regs, regno, buf);
1669
1670 memcpy (valbuf, buf, len);
1671 valbuf += len;
1672 }
1673 }
1674 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1675 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1676 {
1677 /* Short vector is returned in V register. */
1678 gdb_byte buf[V_REGISTER_SIZE];
1679
1680 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1681 memcpy (valbuf, buf, TYPE_LENGTH (type));
1682 }
1683 else
1684 {
1685 /* For a structure or union the behaviour is as if the value had
1686 been stored to word-aligned memory and then loaded into
1687 registers with 64-bit load instruction(s). */
1688 int len = TYPE_LENGTH (type);
1689 int regno = AARCH64_X0_REGNUM;
1690 bfd_byte buf[X_REGISTER_SIZE];
1691
1692 while (len > 0)
1693 {
1694 regcache_cooked_read (regs, regno++, buf);
1695 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1696 len -= X_REGISTER_SIZE;
1697 valbuf += X_REGISTER_SIZE;
1698 }
1699 }
1700 }
1701
1702
1703 /* Will a function return an aggregate type in memory or in a
1704 register? Return 0 if an aggregate type can be returned in a
1705 register, 1 if it must be returned in memory. */
1706
1707 static int
1708 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1709 {
1710 int nRc;
1711 enum type_code code;
1712
1713 type = check_typedef (type);
1714
1715 if (is_hfa_or_hva (type))
1716 {
1717 /* v0-v7 are used to return values and one register is allocated
1718 for one member. However, HFA or HVA has at most four members. */
1719 return 0;
1720 }
1721
1722 if (TYPE_LENGTH (type) > 16)
1723 {
1724 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1725 invisible reference. */
1726
1727 return 1;
1728 }
1729
1730 return 0;
1731 }
1732
1733 /* Write into appropriate registers a function return value of type
1734 TYPE, given in virtual format. */
1735
1736 static void
1737 aarch64_store_return_value (struct type *type, struct regcache *regs,
1738 const gdb_byte *valbuf)
1739 {
1740 struct gdbarch *gdbarch = get_regcache_arch (regs);
1741 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1742
1743 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1744 {
1745 bfd_byte buf[V_REGISTER_SIZE];
1746 int len = TYPE_LENGTH (type);
1747
1748 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1749 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1750 }
1751 else if (TYPE_CODE (type) == TYPE_CODE_INT
1752 || TYPE_CODE (type) == TYPE_CODE_CHAR
1753 || TYPE_CODE (type) == TYPE_CODE_BOOL
1754 || TYPE_CODE (type) == TYPE_CODE_PTR
1755 || TYPE_CODE (type) == TYPE_CODE_REF
1756 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1757 {
1758 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1759 {
1760 /* Values of one word or less are zero/sign-extended and
1761 returned in r0. */
1762 bfd_byte tmpbuf[X_REGISTER_SIZE];
1763 LONGEST val = unpack_long (type, valbuf);
1764
1765 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1766 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1767 }
1768 else
1769 {
1770 /* Integral values greater than one word are stored in
1771 consecutive registers starting with r0. This will always
1772 be a multiple of the regiser size. */
1773 int len = TYPE_LENGTH (type);
1774 int regno = AARCH64_X0_REGNUM;
1775
1776 while (len > 0)
1777 {
1778 regcache_cooked_write (regs, regno++, valbuf);
1779 len -= X_REGISTER_SIZE;
1780 valbuf += X_REGISTER_SIZE;
1781 }
1782 }
1783 }
1784 else if (is_hfa_or_hva (type))
1785 {
1786 int elements = TYPE_NFIELDS (type);
1787 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1788 int len = TYPE_LENGTH (member_type);
1789 int i;
1790
1791 for (i = 0; i < elements; i++)
1792 {
1793 int regno = AARCH64_V0_REGNUM + i;
1794 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1795
1796 if (aarch64_debug)
1797 {
1798 debug_printf ("write HFA or HVA return value element %d to %s\n",
1799 i + 1,
1800 gdbarch_register_name (gdbarch, regno));
1801 }
1802
1803 memcpy (tmpbuf, valbuf, len);
1804 regcache_cooked_write (regs, regno, tmpbuf);
1805 valbuf += len;
1806 }
1807 }
1808 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1809 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1810 {
1811 /* Short vector. */
1812 gdb_byte buf[V_REGISTER_SIZE];
1813
1814 memcpy (buf, valbuf, TYPE_LENGTH (type));
1815 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1816 }
1817 else
1818 {
1819 /* For a structure or union the behaviour is as if the value had
1820 been stored to word-aligned memory and then loaded into
1821 registers with 64-bit load instruction(s). */
1822 int len = TYPE_LENGTH (type);
1823 int regno = AARCH64_X0_REGNUM;
1824 bfd_byte tmpbuf[X_REGISTER_SIZE];
1825
1826 while (len > 0)
1827 {
1828 memcpy (tmpbuf, valbuf,
1829 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1830 regcache_cooked_write (regs, regno++, tmpbuf);
1831 len -= X_REGISTER_SIZE;
1832 valbuf += X_REGISTER_SIZE;
1833 }
1834 }
1835 }
1836
1837 /* Implement the "return_value" gdbarch method. */
1838
1839 static enum return_value_convention
1840 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1841 struct type *valtype, struct regcache *regcache,
1842 gdb_byte *readbuf, const gdb_byte *writebuf)
1843 {
1844 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1845
1846 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1847 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1848 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1849 {
1850 if (aarch64_return_in_memory (gdbarch, valtype))
1851 {
1852 if (aarch64_debug)
1853 debug_printf ("return value in memory\n");
1854 return RETURN_VALUE_STRUCT_CONVENTION;
1855 }
1856 }
1857
1858 if (writebuf)
1859 aarch64_store_return_value (valtype, regcache, writebuf);
1860
1861 if (readbuf)
1862 aarch64_extract_return_value (valtype, regcache, readbuf);
1863
1864 if (aarch64_debug)
1865 debug_printf ("return value in registers\n");
1866
1867 return RETURN_VALUE_REGISTER_CONVENTION;
1868 }
1869
1870 /* Implement the "get_longjmp_target" gdbarch method. */
1871
1872 static int
1873 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1874 {
1875 CORE_ADDR jb_addr;
1876 gdb_byte buf[X_REGISTER_SIZE];
1877 struct gdbarch *gdbarch = get_frame_arch (frame);
1878 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1879 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1880
1881 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1882
1883 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1884 X_REGISTER_SIZE))
1885 return 0;
1886
1887 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1888 return 1;
1889 }
1890
1891 /* Implement the "gen_return_address" gdbarch method. */
1892
1893 static void
1894 aarch64_gen_return_address (struct gdbarch *gdbarch,
1895 struct agent_expr *ax, struct axs_value *value,
1896 CORE_ADDR scope)
1897 {
1898 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1899 value->kind = axs_lvalue_register;
1900 value->u.reg = AARCH64_LR_REGNUM;
1901 }
1902 \f
1903
1904 /* Return the pseudo register name corresponding to register regnum. */
1905
1906 static const char *
1907 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1908 {
1909 static const char *const q_name[] =
1910 {
1911 "q0", "q1", "q2", "q3",
1912 "q4", "q5", "q6", "q7",
1913 "q8", "q9", "q10", "q11",
1914 "q12", "q13", "q14", "q15",
1915 "q16", "q17", "q18", "q19",
1916 "q20", "q21", "q22", "q23",
1917 "q24", "q25", "q26", "q27",
1918 "q28", "q29", "q30", "q31",
1919 };
1920
1921 static const char *const d_name[] =
1922 {
1923 "d0", "d1", "d2", "d3",
1924 "d4", "d5", "d6", "d7",
1925 "d8", "d9", "d10", "d11",
1926 "d12", "d13", "d14", "d15",
1927 "d16", "d17", "d18", "d19",
1928 "d20", "d21", "d22", "d23",
1929 "d24", "d25", "d26", "d27",
1930 "d28", "d29", "d30", "d31",
1931 };
1932
1933 static const char *const s_name[] =
1934 {
1935 "s0", "s1", "s2", "s3",
1936 "s4", "s5", "s6", "s7",
1937 "s8", "s9", "s10", "s11",
1938 "s12", "s13", "s14", "s15",
1939 "s16", "s17", "s18", "s19",
1940 "s20", "s21", "s22", "s23",
1941 "s24", "s25", "s26", "s27",
1942 "s28", "s29", "s30", "s31",
1943 };
1944
1945 static const char *const h_name[] =
1946 {
1947 "h0", "h1", "h2", "h3",
1948 "h4", "h5", "h6", "h7",
1949 "h8", "h9", "h10", "h11",
1950 "h12", "h13", "h14", "h15",
1951 "h16", "h17", "h18", "h19",
1952 "h20", "h21", "h22", "h23",
1953 "h24", "h25", "h26", "h27",
1954 "h28", "h29", "h30", "h31",
1955 };
1956
1957 static const char *const b_name[] =
1958 {
1959 "b0", "b1", "b2", "b3",
1960 "b4", "b5", "b6", "b7",
1961 "b8", "b9", "b10", "b11",
1962 "b12", "b13", "b14", "b15",
1963 "b16", "b17", "b18", "b19",
1964 "b20", "b21", "b22", "b23",
1965 "b24", "b25", "b26", "b27",
1966 "b28", "b29", "b30", "b31",
1967 };
1968
1969 regnum -= gdbarch_num_regs (gdbarch);
1970
1971 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1972 return q_name[regnum - AARCH64_Q0_REGNUM];
1973
1974 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1975 return d_name[regnum - AARCH64_D0_REGNUM];
1976
1977 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1978 return s_name[regnum - AARCH64_S0_REGNUM];
1979
1980 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1981 return h_name[regnum - AARCH64_H0_REGNUM];
1982
1983 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1984 return b_name[regnum - AARCH64_B0_REGNUM];
1985
1986 internal_error (__FILE__, __LINE__,
1987 _("aarch64_pseudo_register_name: bad register number %d"),
1988 regnum);
1989 }
1990
1991 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
1992
1993 static struct type *
1994 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1995 {
1996 regnum -= gdbarch_num_regs (gdbarch);
1997
1998 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1999 return aarch64_vnq_type (gdbarch);
2000
2001 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2002 return aarch64_vnd_type (gdbarch);
2003
2004 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2005 return aarch64_vns_type (gdbarch);
2006
2007 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2008 return aarch64_vnh_type (gdbarch);
2009
2010 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2011 return aarch64_vnb_type (gdbarch);
2012
2013 internal_error (__FILE__, __LINE__,
2014 _("aarch64_pseudo_register_type: bad register number %d"),
2015 regnum);
2016 }
2017
2018 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2019
2020 static int
2021 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2022 struct reggroup *group)
2023 {
2024 regnum -= gdbarch_num_regs (gdbarch);
2025
2026 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2027 return group == all_reggroup || group == vector_reggroup;
2028 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2029 return (group == all_reggroup || group == vector_reggroup
2030 || group == float_reggroup);
2031 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2032 return (group == all_reggroup || group == vector_reggroup
2033 || group == float_reggroup);
2034 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2035 return group == all_reggroup || group == vector_reggroup;
2036 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2037 return group == all_reggroup || group == vector_reggroup;
2038
2039 return group == all_reggroup;
2040 }
2041
2042 /* Implement the "pseudo_register_read_value" gdbarch method. */
2043
2044 static struct value *
2045 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2046 struct regcache *regcache,
2047 int regnum)
2048 {
2049 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2050 struct value *result_value;
2051 gdb_byte *buf;
2052
2053 result_value = allocate_value (register_type (gdbarch, regnum));
2054 VALUE_LVAL (result_value) = lval_register;
2055 VALUE_REGNUM (result_value) = regnum;
2056 buf = value_contents_raw (result_value);
2057
2058 regnum -= gdbarch_num_regs (gdbarch);
2059
2060 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2061 {
2062 enum register_status status;
2063 unsigned v_regnum;
2064
2065 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2066 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2067 if (status != REG_VALID)
2068 mark_value_bytes_unavailable (result_value, 0,
2069 TYPE_LENGTH (value_type (result_value)));
2070 else
2071 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2072 return result_value;
2073 }
2074
2075 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2076 {
2077 enum register_status status;
2078 unsigned v_regnum;
2079
2080 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2081 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2082 if (status != REG_VALID)
2083 mark_value_bytes_unavailable (result_value, 0,
2084 TYPE_LENGTH (value_type (result_value)));
2085 else
2086 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2087 return result_value;
2088 }
2089
2090 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2091 {
2092 enum register_status status;
2093 unsigned v_regnum;
2094
2095 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2096 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2097 if (status != REG_VALID)
2098 mark_value_bytes_unavailable (result_value, 0,
2099 TYPE_LENGTH (value_type (result_value)));
2100 else
2101 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2102 return result_value;
2103 }
2104
2105 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2106 {
2107 enum register_status status;
2108 unsigned v_regnum;
2109
2110 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2111 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2112 if (status != REG_VALID)
2113 mark_value_bytes_unavailable (result_value, 0,
2114 TYPE_LENGTH (value_type (result_value)));
2115 else
2116 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2117 return result_value;
2118 }
2119
2120 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2121 {
2122 enum register_status status;
2123 unsigned v_regnum;
2124
2125 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2126 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2127 if (status != REG_VALID)
2128 mark_value_bytes_unavailable (result_value, 0,
2129 TYPE_LENGTH (value_type (result_value)));
2130 else
2131 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2132 return result_value;
2133 }
2134
2135 gdb_assert_not_reached ("regnum out of bound");
2136 }
2137
2138 /* Implement the "pseudo_register_write" gdbarch method. */
2139
2140 static void
2141 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2142 int regnum, const gdb_byte *buf)
2143 {
2144 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2145
2146 /* Ensure the register buffer is zero, we want gdb writes of the
2147 various 'scalar' pseudo registers to behavior like architectural
2148 writes, register width bytes are written the remainder are set to
2149 zero. */
2150 memset (reg_buf, 0, sizeof (reg_buf));
2151
2152 regnum -= gdbarch_num_regs (gdbarch);
2153
2154 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2155 {
2156 /* pseudo Q registers */
2157 unsigned v_regnum;
2158
2159 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2160 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2161 regcache_raw_write (regcache, v_regnum, reg_buf);
2162 return;
2163 }
2164
2165 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2166 {
2167 /* pseudo D registers */
2168 unsigned v_regnum;
2169
2170 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2171 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2172 regcache_raw_write (regcache, v_regnum, reg_buf);
2173 return;
2174 }
2175
2176 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2177 {
2178 unsigned v_regnum;
2179
2180 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2181 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2182 regcache_raw_write (regcache, v_regnum, reg_buf);
2183 return;
2184 }
2185
2186 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2187 {
2188 /* pseudo H registers */
2189 unsigned v_regnum;
2190
2191 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2192 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2193 regcache_raw_write (regcache, v_regnum, reg_buf);
2194 return;
2195 }
2196
2197 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2198 {
2199 /* pseudo B registers */
2200 unsigned v_regnum;
2201
2202 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2203 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2204 regcache_raw_write (regcache, v_regnum, reg_buf);
2205 return;
2206 }
2207
2208 gdb_assert_not_reached ("regnum out of bound");
2209 }
2210
2211 /* Callback function for user_reg_add. */
2212
2213 static struct value *
2214 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2215 {
2216 const int *reg_p = (const int *) baton;
2217
2218 return value_of_register (*reg_p, frame);
2219 }
2220 \f
2221
2222 /* Implement the "software_single_step" gdbarch method, needed to
2223 single step through atomic sequences on AArch64. */
2224
2225 static int
2226 aarch64_software_single_step (struct frame_info *frame)
2227 {
2228 struct gdbarch *gdbarch = get_frame_arch (frame);
2229 struct address_space *aspace = get_frame_address_space (frame);
2230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2231 const int insn_size = 4;
2232 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2233 CORE_ADDR pc = get_frame_pc (frame);
2234 CORE_ADDR breaks[2] = { -1, -1 };
2235 CORE_ADDR loc = pc;
2236 CORE_ADDR closing_insn = 0;
2237 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2238 byte_order_for_code);
2239 int index;
2240 int insn_count;
2241 int bc_insn_count = 0; /* Conditional branch instruction count. */
2242 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2243 aarch64_inst inst;
2244
2245 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2246 return 0;
2247
2248 /* Look for a Load Exclusive instruction which begins the sequence. */
2249 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2250 return 0;
2251
2252 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2253 {
2254 loc += insn_size;
2255 insn = read_memory_unsigned_integer (loc, insn_size,
2256 byte_order_for_code);
2257
2258 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2259 return 0;
2260 /* Check if the instruction is a conditional branch. */
2261 if (inst.opcode->iclass == condbranch)
2262 {
2263 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2264
2265 if (bc_insn_count >= 1)
2266 return 0;
2267
2268 /* It is, so we'll try to set a breakpoint at the destination. */
2269 breaks[1] = loc + inst.operands[0].imm.value;
2270
2271 bc_insn_count++;
2272 last_breakpoint++;
2273 }
2274
2275 /* Look for the Store Exclusive which closes the atomic sequence. */
2276 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2277 {
2278 closing_insn = loc;
2279 break;
2280 }
2281 }
2282
2283 /* We didn't find a closing Store Exclusive instruction, fall back. */
2284 if (!closing_insn)
2285 return 0;
2286
2287 /* Insert breakpoint after the end of the atomic sequence. */
2288 breaks[0] = loc + insn_size;
2289
2290 /* Check for duplicated breakpoints, and also check that the second
2291 breakpoint is not within the atomic sequence. */
2292 if (last_breakpoint
2293 && (breaks[1] == breaks[0]
2294 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2295 last_breakpoint = 0;
2296
2297 /* Insert the breakpoint at the end of the sequence, and one at the
2298 destination of the conditional branch, if it exists. */
2299 for (index = 0; index <= last_breakpoint; index++)
2300 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2301
2302 return 1;
2303 }
2304
2305 struct displaced_step_closure
2306 {
2307 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2308 is being displaced stepping. */
2309 int cond;
2310
2311 /* PC adjustment offset after displaced stepping. */
2312 int32_t pc_adjust;
2313 };
2314
2315 /* Data when visiting instructions for displaced stepping. */
2316
2317 struct aarch64_displaced_step_data
2318 {
2319 struct aarch64_insn_data base;
2320
2321 /* The address where the instruction will be executed at. */
2322 CORE_ADDR new_addr;
2323 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2324 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2325 /* Number of instructions in INSN_BUF. */
2326 unsigned insn_count;
2327 /* Registers when doing displaced stepping. */
2328 struct regcache *regs;
2329
2330 struct displaced_step_closure *dsc;
2331 };
2332
2333 /* Implementation of aarch64_insn_visitor method "b". */
2334
2335 static void
2336 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2337 struct aarch64_insn_data *data)
2338 {
2339 struct aarch64_displaced_step_data *dsd
2340 = (struct aarch64_displaced_step_data *) data;
2341 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2342
2343 if (can_encode_int32 (new_offset, 28))
2344 {
2345 /* Emit B rather than BL, because executing BL on a new address
2346 will get the wrong address into LR. In order to avoid this,
2347 we emit B, and update LR if the instruction is BL. */
2348 emit_b (dsd->insn_buf, 0, new_offset);
2349 dsd->insn_count++;
2350 }
2351 else
2352 {
2353 /* Write NOP. */
2354 emit_nop (dsd->insn_buf);
2355 dsd->insn_count++;
2356 dsd->dsc->pc_adjust = offset;
2357 }
2358
2359 if (is_bl)
2360 {
2361 /* Update LR. */
2362 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2363 data->insn_addr + 4);
2364 }
2365 }
2366
2367 /* Implementation of aarch64_insn_visitor method "b_cond". */
2368
2369 static void
2370 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2371 struct aarch64_insn_data *data)
2372 {
2373 struct aarch64_displaced_step_data *dsd
2374 = (struct aarch64_displaced_step_data *) data;
2375 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2376
2377 /* GDB has to fix up PC after displaced step this instruction
2378 differently according to the condition is true or false. Instead
2379 of checking COND against conditional flags, we can use
2380 the following instructions, and GDB can tell how to fix up PC
2381 according to the PC value.
2382
2383 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2384 INSN1 ;
2385 TAKEN:
2386 INSN2
2387 */
2388
2389 emit_bcond (dsd->insn_buf, cond, 8);
2390 dsd->dsc->cond = 1;
2391 dsd->dsc->pc_adjust = offset;
2392 dsd->insn_count = 1;
2393 }
2394
2395 /* Dynamically allocate a new register. If we know the register
2396 statically, we should make it a global as above instead of using this
2397 helper function. */
2398
2399 static struct aarch64_register
2400 aarch64_register (unsigned num, int is64)
2401 {
2402 return (struct aarch64_register) { num, is64 };
2403 }
2404
2405 /* Implementation of aarch64_insn_visitor method "cb". */
2406
2407 static void
2408 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2409 const unsigned rn, int is64,
2410 struct aarch64_insn_data *data)
2411 {
2412 struct aarch64_displaced_step_data *dsd
2413 = (struct aarch64_displaced_step_data *) data;
2414 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2415
2416 /* The offset is out of range for a compare and branch
2417 instruction. We can use the following instructions instead:
2418
2419 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2420 INSN1 ;
2421 TAKEN:
2422 INSN2
2423 */
2424 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2425 dsd->insn_count = 1;
2426 dsd->dsc->cond = 1;
2427 dsd->dsc->pc_adjust = offset;
2428 }
2429
2430 /* Implementation of aarch64_insn_visitor method "tb". */
2431
2432 static void
2433 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2434 const unsigned rt, unsigned bit,
2435 struct aarch64_insn_data *data)
2436 {
2437 struct aarch64_displaced_step_data *dsd
2438 = (struct aarch64_displaced_step_data *) data;
2439 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2440
2441 /* The offset is out of range for a test bit and branch
2442 instruction We can use the following instructions instead:
2443
2444 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2445 INSN1 ;
2446 TAKEN:
2447 INSN2
2448
2449 */
2450 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2451 dsd->insn_count = 1;
2452 dsd->dsc->cond = 1;
2453 dsd->dsc->pc_adjust = offset;
2454 }
2455
2456 /* Implementation of aarch64_insn_visitor method "adr". */
2457
2458 static void
2459 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2460 const int is_adrp, struct aarch64_insn_data *data)
2461 {
2462 struct aarch64_displaced_step_data *dsd
2463 = (struct aarch64_displaced_step_data *) data;
2464 /* We know exactly the address the ADR{P,} instruction will compute.
2465 We can just write it to the destination register. */
2466 CORE_ADDR address = data->insn_addr + offset;
2467
2468 if (is_adrp)
2469 {
2470 /* Clear the lower 12 bits of the offset to get the 4K page. */
2471 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2472 address & ~0xfff);
2473 }
2474 else
2475 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2476 address);
2477
2478 dsd->dsc->pc_adjust = 4;
2479 emit_nop (dsd->insn_buf);
2480 dsd->insn_count = 1;
2481 }
2482
2483 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2484
2485 static void
2486 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2487 const unsigned rt, const int is64,
2488 struct aarch64_insn_data *data)
2489 {
2490 struct aarch64_displaced_step_data *dsd
2491 = (struct aarch64_displaced_step_data *) data;
2492 CORE_ADDR address = data->insn_addr + offset;
2493 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2494
2495 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2496 address);
2497
2498 if (is_sw)
2499 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2500 aarch64_register (rt, 1), zero);
2501 else
2502 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2503 aarch64_register (rt, 1), zero);
2504
2505 dsd->dsc->pc_adjust = 4;
2506 }
2507
2508 /* Implementation of aarch64_insn_visitor method "others". */
2509
2510 static void
2511 aarch64_displaced_step_others (const uint32_t insn,
2512 struct aarch64_insn_data *data)
2513 {
2514 struct aarch64_displaced_step_data *dsd
2515 = (struct aarch64_displaced_step_data *) data;
2516
2517 aarch64_emit_insn (dsd->insn_buf, insn);
2518 dsd->insn_count = 1;
2519
2520 if ((insn & 0xfffffc1f) == 0xd65f0000)
2521 {
2522 /* RET */
2523 dsd->dsc->pc_adjust = 0;
2524 }
2525 else
2526 dsd->dsc->pc_adjust = 4;
2527 }
2528
2529 static const struct aarch64_insn_visitor visitor =
2530 {
2531 aarch64_displaced_step_b,
2532 aarch64_displaced_step_b_cond,
2533 aarch64_displaced_step_cb,
2534 aarch64_displaced_step_tb,
2535 aarch64_displaced_step_adr,
2536 aarch64_displaced_step_ldr_literal,
2537 aarch64_displaced_step_others,
2538 };
2539
2540 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2541
2542 struct displaced_step_closure *
2543 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2544 CORE_ADDR from, CORE_ADDR to,
2545 struct regcache *regs)
2546 {
2547 struct displaced_step_closure *dsc = NULL;
2548 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2549 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2550 struct aarch64_displaced_step_data dsd;
2551 aarch64_inst inst;
2552
2553 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2554 return NULL;
2555
2556 /* Look for a Load Exclusive instruction which begins the sequence. */
2557 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2558 {
2559 /* We can't displaced step atomic sequences. */
2560 return NULL;
2561 }
2562
2563 dsc = XCNEW (struct displaced_step_closure);
2564 dsd.base.insn_addr = from;
2565 dsd.new_addr = to;
2566 dsd.regs = regs;
2567 dsd.dsc = dsc;
2568 dsd.insn_count = 0;
2569 aarch64_relocate_instruction (insn, &visitor,
2570 (struct aarch64_insn_data *) &dsd);
2571 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2572
2573 if (dsd.insn_count != 0)
2574 {
2575 int i;
2576
2577 /* Instruction can be relocated to scratch pad. Copy
2578 relocated instruction(s) there. */
2579 for (i = 0; i < dsd.insn_count; i++)
2580 {
2581 if (debug_displaced)
2582 {
2583 debug_printf ("displaced: writing insn ");
2584 debug_printf ("%.8x", dsd.insn_buf[i]);
2585 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2586 }
2587 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2588 (ULONGEST) dsd.insn_buf[i]);
2589 }
2590 }
2591 else
2592 {
2593 xfree (dsc);
2594 dsc = NULL;
2595 }
2596
2597 return dsc;
2598 }
2599
2600 /* Implement the "displaced_step_fixup" gdbarch method. */
2601
2602 void
2603 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2604 struct displaced_step_closure *dsc,
2605 CORE_ADDR from, CORE_ADDR to,
2606 struct regcache *regs)
2607 {
2608 if (dsc->cond)
2609 {
2610 ULONGEST pc;
2611
2612 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2613 if (pc - to == 8)
2614 {
2615 /* Condition is true. */
2616 }
2617 else if (pc - to == 4)
2618 {
2619 /* Condition is false. */
2620 dsc->pc_adjust = 4;
2621 }
2622 else
2623 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2624 }
2625
2626 if (dsc->pc_adjust != 0)
2627 {
2628 if (debug_displaced)
2629 {
2630 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2631 paddress (gdbarch, from), dsc->pc_adjust);
2632 }
2633 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2634 from + dsc->pc_adjust);
2635 }
2636 }
2637
2638 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2639
2640 int
2641 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2642 struct displaced_step_closure *closure)
2643 {
2644 return 1;
2645 }
2646
2647 /* Initialize the current architecture based on INFO. If possible,
2648 re-use an architecture from ARCHES, which is a list of
2649 architectures already created during this debugging session.
2650
2651 Called e.g. at program startup, when reading a core file, and when
2652 reading a binary file. */
2653
2654 static struct gdbarch *
2655 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2656 {
2657 struct gdbarch_tdep *tdep;
2658 struct gdbarch *gdbarch;
2659 struct gdbarch_list *best_arch;
2660 struct tdesc_arch_data *tdesc_data = NULL;
2661 const struct target_desc *tdesc = info.target_desc;
2662 int i;
2663 int have_fpa_registers = 1;
2664 int valid_p = 1;
2665 const struct tdesc_feature *feature;
2666 int num_regs = 0;
2667 int num_pseudo_regs = 0;
2668
2669 /* Ensure we always have a target descriptor. */
2670 if (!tdesc_has_registers (tdesc))
2671 tdesc = tdesc_aarch64;
2672
2673 gdb_assert (tdesc);
2674
2675 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2676
2677 if (feature == NULL)
2678 return NULL;
2679
2680 tdesc_data = tdesc_data_alloc ();
2681
2682 /* Validate the descriptor provides the mandatory core R registers
2683 and allocate their numbers. */
2684 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2685 valid_p &=
2686 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2687 aarch64_r_register_names[i]);
2688
2689 num_regs = AARCH64_X0_REGNUM + i;
2690
2691 /* Look for the V registers. */
2692 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2693 if (feature)
2694 {
2695 /* Validate the descriptor provides the mandatory V registers
2696 and allocate their numbers. */
2697 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2698 valid_p &=
2699 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2700 aarch64_v_register_names[i]);
2701
2702 num_regs = AARCH64_V0_REGNUM + i;
2703
2704 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2705 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2706 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2707 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2708 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2709 }
2710
2711 if (!valid_p)
2712 {
2713 tdesc_data_cleanup (tdesc_data);
2714 return NULL;
2715 }
2716
2717 /* AArch64 code is always little-endian. */
2718 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2719
2720 /* If there is already a candidate, use it. */
2721 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2722 best_arch != NULL;
2723 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2724 {
2725 /* Found a match. */
2726 break;
2727 }
2728
2729 if (best_arch != NULL)
2730 {
2731 if (tdesc_data != NULL)
2732 tdesc_data_cleanup (tdesc_data);
2733 return best_arch->gdbarch;
2734 }
2735
2736 tdep = XCNEW (struct gdbarch_tdep);
2737 gdbarch = gdbarch_alloc (&info, tdep);
2738
2739 /* This should be low enough for everything. */
2740 tdep->lowest_pc = 0x20;
2741 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2742 tdep->jb_elt_size = 8;
2743
2744 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2745 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2746
2747 /* Frame handling. */
2748 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2749 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2750 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2751
2752 /* Advance PC across function entry code. */
2753 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2754
2755 /* The stack grows downward. */
2756 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2757
2758 /* Breakpoint manipulation. */
2759 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2760 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2761 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2762
2763 /* Information about registers, etc. */
2764 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2765 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2766 set_gdbarch_num_regs (gdbarch, num_regs);
2767
2768 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2769 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2770 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2771 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2772 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2773 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2774 aarch64_pseudo_register_reggroup_p);
2775
2776 /* ABI */
2777 set_gdbarch_short_bit (gdbarch, 16);
2778 set_gdbarch_int_bit (gdbarch, 32);
2779 set_gdbarch_float_bit (gdbarch, 32);
2780 set_gdbarch_double_bit (gdbarch, 64);
2781 set_gdbarch_long_double_bit (gdbarch, 128);
2782 set_gdbarch_long_bit (gdbarch, 64);
2783 set_gdbarch_long_long_bit (gdbarch, 64);
2784 set_gdbarch_ptr_bit (gdbarch, 64);
2785 set_gdbarch_char_signed (gdbarch, 0);
2786 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2787 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2788 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2789
2790 /* Internal <-> external register number maps. */
2791 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2792
2793 /* Returning results. */
2794 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2795
2796 /* Disassembly. */
2797 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2798
2799 /* Virtual tables. */
2800 set_gdbarch_vbit_in_delta (gdbarch, 1);
2801
2802 /* Hook in the ABI-specific overrides, if they have been registered. */
2803 info.target_desc = tdesc;
2804 info.tdep_info = (void *) tdesc_data;
2805 gdbarch_init_osabi (info, gdbarch);
2806
2807 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2808
2809 /* Add some default predicates. */
2810 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2811 dwarf2_append_unwinders (gdbarch);
2812 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2813
2814 frame_base_set_default (gdbarch, &aarch64_normal_base);
2815
2816 /* Now we have tuned the configuration, set a few final things,
2817 based on what the OS ABI has told us. */
2818
2819 if (tdep->jb_pc >= 0)
2820 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2821
2822 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2823
2824 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2825
2826 /* Add standard register aliases. */
2827 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2828 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2829 value_of_aarch64_user_reg,
2830 &aarch64_register_aliases[i].regnum);
2831
2832 return gdbarch;
2833 }
2834
2835 static void
2836 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2837 {
2838 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2839
2840 if (tdep == NULL)
2841 return;
2842
2843 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2844 paddress (gdbarch, tdep->lowest_pc));
2845 }
2846
2847 /* Suppress warning from -Wmissing-prototypes. */
2848 extern initialize_file_ftype _initialize_aarch64_tdep;
2849
2850 void
2851 _initialize_aarch64_tdep (void)
2852 {
2853 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2854 aarch64_dump_tdep);
2855
2856 initialize_tdesc_aarch64 ();
2857
2858 /* Debug this file's internals. */
2859 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2860 Set AArch64 debugging."), _("\
2861 Show AArch64 debugging."), _("\
2862 When on, AArch64 specific debugging is enabled."),
2863 NULL,
2864 show_aarch64_debug,
2865 &setdebuglist, &showdebuglist);
2866 }
2867
2868 /* AArch64 process record-replay related structures, defines etc. */
2869
2870 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2871 do \
2872 { \
2873 unsigned int reg_len = LENGTH; \
2874 if (reg_len) \
2875 { \
2876 REGS = XNEWVEC (uint32_t, reg_len); \
2877 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2878 } \
2879 } \
2880 while (0)
2881
2882 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2883 do \
2884 { \
2885 unsigned int mem_len = LENGTH; \
2886 if (mem_len) \
2887 { \
2888 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2889 memcpy(&MEMS->len, &RECORD_BUF[0], \
2890 sizeof(struct aarch64_mem_r) * LENGTH); \
2891 } \
2892 } \
2893 while (0)
2894
2895 /* AArch64 record/replay structures and enumerations. */
2896
2897 struct aarch64_mem_r
2898 {
2899 uint64_t len; /* Record length. */
2900 uint64_t addr; /* Memory address. */
2901 };
2902
2903 enum aarch64_record_result
2904 {
2905 AARCH64_RECORD_SUCCESS,
2906 AARCH64_RECORD_FAILURE,
2907 AARCH64_RECORD_UNSUPPORTED,
2908 AARCH64_RECORD_UNKNOWN
2909 };
2910
2911 typedef struct insn_decode_record_t
2912 {
2913 struct gdbarch *gdbarch;
2914 struct regcache *regcache;
2915 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2916 uint32_t aarch64_insn; /* Insn to be recorded. */
2917 uint32_t mem_rec_count; /* Count of memory records. */
2918 uint32_t reg_rec_count; /* Count of register records. */
2919 uint32_t *aarch64_regs; /* Registers to be recorded. */
2920 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2921 } insn_decode_record;
2922
2923 /* Record handler for data processing - register instructions. */
2924
2925 static unsigned int
2926 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2927 {
2928 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2929 uint32_t record_buf[4];
2930
2931 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2932 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2933 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2934
2935 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2936 {
2937 uint8_t setflags;
2938
2939 /* Logical (shifted register). */
2940 if (insn_bits24_27 == 0x0a)
2941 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2942 /* Add/subtract. */
2943 else if (insn_bits24_27 == 0x0b)
2944 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2945 else
2946 return AARCH64_RECORD_UNKNOWN;
2947
2948 record_buf[0] = reg_rd;
2949 aarch64_insn_r->reg_rec_count = 1;
2950 if (setflags)
2951 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2952 }
2953 else
2954 {
2955 if (insn_bits24_27 == 0x0b)
2956 {
2957 /* Data-processing (3 source). */
2958 record_buf[0] = reg_rd;
2959 aarch64_insn_r->reg_rec_count = 1;
2960 }
2961 else if (insn_bits24_27 == 0x0a)
2962 {
2963 if (insn_bits21_23 == 0x00)
2964 {
2965 /* Add/subtract (with carry). */
2966 record_buf[0] = reg_rd;
2967 aarch64_insn_r->reg_rec_count = 1;
2968 if (bit (aarch64_insn_r->aarch64_insn, 29))
2969 {
2970 record_buf[1] = AARCH64_CPSR_REGNUM;
2971 aarch64_insn_r->reg_rec_count = 2;
2972 }
2973 }
2974 else if (insn_bits21_23 == 0x02)
2975 {
2976 /* Conditional compare (register) and conditional compare
2977 (immediate) instructions. */
2978 record_buf[0] = AARCH64_CPSR_REGNUM;
2979 aarch64_insn_r->reg_rec_count = 1;
2980 }
2981 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2982 {
2983 /* CConditional select. */
2984 /* Data-processing (2 source). */
2985 /* Data-processing (1 source). */
2986 record_buf[0] = reg_rd;
2987 aarch64_insn_r->reg_rec_count = 1;
2988 }
2989 else
2990 return AARCH64_RECORD_UNKNOWN;
2991 }
2992 }
2993
2994 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2995 record_buf);
2996 return AARCH64_RECORD_SUCCESS;
2997 }
2998
2999 /* Record handler for data processing - immediate instructions. */
3000
3001 static unsigned int
3002 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3003 {
3004 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3005 uint32_t record_buf[4];
3006
3007 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3008 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3009 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3010 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3011
3012 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3013 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3014 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3015 {
3016 record_buf[0] = reg_rd;
3017 aarch64_insn_r->reg_rec_count = 1;
3018 }
3019 else if (insn_bits24_27 == 0x01)
3020 {
3021 /* Add/Subtract (immediate). */
3022 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3023 record_buf[0] = reg_rd;
3024 aarch64_insn_r->reg_rec_count = 1;
3025 if (setflags)
3026 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3027 }
3028 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3029 {
3030 /* Logical (immediate). */
3031 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3032 record_buf[0] = reg_rd;
3033 aarch64_insn_r->reg_rec_count = 1;
3034 if (setflags)
3035 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3036 }
3037 else
3038 return AARCH64_RECORD_UNKNOWN;
3039
3040 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3041 record_buf);
3042 return AARCH64_RECORD_SUCCESS;
3043 }
3044
3045 /* Record handler for branch, exception generation and system instructions. */
3046
3047 static unsigned int
3048 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3049 {
3050 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3051 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3052 uint32_t record_buf[4];
3053
3054 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3055 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3056 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3057
3058 if (insn_bits28_31 == 0x0d)
3059 {
3060 /* Exception generation instructions. */
3061 if (insn_bits24_27 == 0x04)
3062 {
3063 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3064 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3065 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3066 {
3067 ULONGEST svc_number;
3068
3069 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3070 &svc_number);
3071 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3072 svc_number);
3073 }
3074 else
3075 return AARCH64_RECORD_UNSUPPORTED;
3076 }
3077 /* System instructions. */
3078 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3079 {
3080 uint32_t reg_rt, reg_crn;
3081
3082 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3083 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3084
3085 /* Record rt in case of sysl and mrs instructions. */
3086 if (bit (aarch64_insn_r->aarch64_insn, 21))
3087 {
3088 record_buf[0] = reg_rt;
3089 aarch64_insn_r->reg_rec_count = 1;
3090 }
3091 /* Record cpsr for hint and msr(immediate) instructions. */
3092 else if (reg_crn == 0x02 || reg_crn == 0x04)
3093 {
3094 record_buf[0] = AARCH64_CPSR_REGNUM;
3095 aarch64_insn_r->reg_rec_count = 1;
3096 }
3097 }
3098 /* Unconditional branch (register). */
3099 else if((insn_bits24_27 & 0x0e) == 0x06)
3100 {
3101 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3102 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3103 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3104 }
3105 else
3106 return AARCH64_RECORD_UNKNOWN;
3107 }
3108 /* Unconditional branch (immediate). */
3109 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3110 {
3111 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3112 if (bit (aarch64_insn_r->aarch64_insn, 31))
3113 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3114 }
3115 else
3116 /* Compare & branch (immediate), Test & branch (immediate) and
3117 Conditional branch (immediate). */
3118 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3119
3120 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3121 record_buf);
3122 return AARCH64_RECORD_SUCCESS;
3123 }
3124
3125 /* Record handler for advanced SIMD load and store instructions. */
3126
3127 static unsigned int
3128 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3129 {
3130 CORE_ADDR address;
3131 uint64_t addr_offset = 0;
3132 uint32_t record_buf[24];
3133 uint64_t record_buf_mem[24];
3134 uint32_t reg_rn, reg_rt;
3135 uint32_t reg_index = 0, mem_index = 0;
3136 uint8_t opcode_bits, size_bits;
3137
3138 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3139 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3140 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3141 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3142 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3143
3144 if (record_debug)
3145 debug_printf ("Process record: Advanced SIMD load/store\n");
3146
3147 /* Load/store single structure. */
3148 if (bit (aarch64_insn_r->aarch64_insn, 24))
3149 {
3150 uint8_t sindex, scale, selem, esize, replicate = 0;
3151 scale = opcode_bits >> 2;
3152 selem = ((opcode_bits & 0x02) |
3153 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3154 switch (scale)
3155 {
3156 case 1:
3157 if (size_bits & 0x01)
3158 return AARCH64_RECORD_UNKNOWN;
3159 break;
3160 case 2:
3161 if ((size_bits >> 1) & 0x01)
3162 return AARCH64_RECORD_UNKNOWN;
3163 if (size_bits & 0x01)
3164 {
3165 if (!((opcode_bits >> 1) & 0x01))
3166 scale = 3;
3167 else
3168 return AARCH64_RECORD_UNKNOWN;
3169 }
3170 break;
3171 case 3:
3172 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3173 {
3174 scale = size_bits;
3175 replicate = 1;
3176 break;
3177 }
3178 else
3179 return AARCH64_RECORD_UNKNOWN;
3180 default:
3181 break;
3182 }
3183 esize = 8 << scale;
3184 if (replicate)
3185 for (sindex = 0; sindex < selem; sindex++)
3186 {
3187 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3188 reg_rt = (reg_rt + 1) % 32;
3189 }
3190 else
3191 {
3192 for (sindex = 0; sindex < selem; sindex++)
3193 if (bit (aarch64_insn_r->aarch64_insn, 22))
3194 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3195 else
3196 {
3197 record_buf_mem[mem_index++] = esize / 8;
3198 record_buf_mem[mem_index++] = address + addr_offset;
3199 }
3200 addr_offset = addr_offset + (esize / 8);
3201 reg_rt = (reg_rt + 1) % 32;
3202 }
3203 }
3204 /* Load/store multiple structure. */
3205 else
3206 {
3207 uint8_t selem, esize, rpt, elements;
3208 uint8_t eindex, rindex;
3209
3210 esize = 8 << size_bits;
3211 if (bit (aarch64_insn_r->aarch64_insn, 30))
3212 elements = 128 / esize;
3213 else
3214 elements = 64 / esize;
3215
3216 switch (opcode_bits)
3217 {
3218 /*LD/ST4 (4 Registers). */
3219 case 0:
3220 rpt = 1;
3221 selem = 4;
3222 break;
3223 /*LD/ST1 (4 Registers). */
3224 case 2:
3225 rpt = 4;
3226 selem = 1;
3227 break;
3228 /*LD/ST3 (3 Registers). */
3229 case 4:
3230 rpt = 1;
3231 selem = 3;
3232 break;
3233 /*LD/ST1 (3 Registers). */
3234 case 6:
3235 rpt = 3;
3236 selem = 1;
3237 break;
3238 /*LD/ST1 (1 Register). */
3239 case 7:
3240 rpt = 1;
3241 selem = 1;
3242 break;
3243 /*LD/ST2 (2 Registers). */
3244 case 8:
3245 rpt = 1;
3246 selem = 2;
3247 break;
3248 /*LD/ST1 (2 Registers). */
3249 case 10:
3250 rpt = 2;
3251 selem = 1;
3252 break;
3253 default:
3254 return AARCH64_RECORD_UNSUPPORTED;
3255 break;
3256 }
3257 for (rindex = 0; rindex < rpt; rindex++)
3258 for (eindex = 0; eindex < elements; eindex++)
3259 {
3260 uint8_t reg_tt, sindex;
3261 reg_tt = (reg_rt + rindex) % 32;
3262 for (sindex = 0; sindex < selem; sindex++)
3263 {
3264 if (bit (aarch64_insn_r->aarch64_insn, 22))
3265 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3266 else
3267 {
3268 record_buf_mem[mem_index++] = esize / 8;
3269 record_buf_mem[mem_index++] = address + addr_offset;
3270 }
3271 addr_offset = addr_offset + (esize / 8);
3272 reg_tt = (reg_tt + 1) % 32;
3273 }
3274 }
3275 }
3276
3277 if (bit (aarch64_insn_r->aarch64_insn, 23))
3278 record_buf[reg_index++] = reg_rn;
3279
3280 aarch64_insn_r->reg_rec_count = reg_index;
3281 aarch64_insn_r->mem_rec_count = mem_index / 2;
3282 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3283 record_buf_mem);
3284 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3285 record_buf);
3286 return AARCH64_RECORD_SUCCESS;
3287 }
3288
3289 /* Record handler for load and store instructions. */
3290
3291 static unsigned int
3292 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3293 {
3294 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3295 uint8_t insn_bit23, insn_bit21;
3296 uint8_t opc, size_bits, ld_flag, vector_flag;
3297 uint32_t reg_rn, reg_rt, reg_rt2;
3298 uint64_t datasize, offset;
3299 uint32_t record_buf[8];
3300 uint64_t record_buf_mem[8];
3301 CORE_ADDR address;
3302
3303 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3304 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3305 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3306 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3307 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3308 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3309 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3310 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3311 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3312 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3313 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3314
3315 /* Load/store exclusive. */
3316 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3317 {
3318 if (record_debug)
3319 debug_printf ("Process record: load/store exclusive\n");
3320
3321 if (ld_flag)
3322 {
3323 record_buf[0] = reg_rt;
3324 aarch64_insn_r->reg_rec_count = 1;
3325 if (insn_bit21)
3326 {
3327 record_buf[1] = reg_rt2;
3328 aarch64_insn_r->reg_rec_count = 2;
3329 }
3330 }
3331 else
3332 {
3333 if (insn_bit21)
3334 datasize = (8 << size_bits) * 2;
3335 else
3336 datasize = (8 << size_bits);
3337 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3338 &address);
3339 record_buf_mem[0] = datasize / 8;
3340 record_buf_mem[1] = address;
3341 aarch64_insn_r->mem_rec_count = 1;
3342 if (!insn_bit23)
3343 {
3344 /* Save register rs. */
3345 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3346 aarch64_insn_r->reg_rec_count = 1;
3347 }
3348 }
3349 }
3350 /* Load register (literal) instructions decoding. */
3351 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3352 {
3353 if (record_debug)
3354 debug_printf ("Process record: load register (literal)\n");
3355 if (vector_flag)
3356 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3357 else
3358 record_buf[0] = reg_rt;
3359 aarch64_insn_r->reg_rec_count = 1;
3360 }
3361 /* All types of load/store pair instructions decoding. */
3362 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3363 {
3364 if (record_debug)
3365 debug_printf ("Process record: load/store pair\n");
3366
3367 if (ld_flag)
3368 {
3369 if (vector_flag)
3370 {
3371 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3372 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3373 }
3374 else
3375 {
3376 record_buf[0] = reg_rt;
3377 record_buf[1] = reg_rt2;
3378 }
3379 aarch64_insn_r->reg_rec_count = 2;
3380 }
3381 else
3382 {
3383 uint16_t imm7_off;
3384 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3385 if (!vector_flag)
3386 size_bits = size_bits >> 1;
3387 datasize = 8 << (2 + size_bits);
3388 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3389 offset = offset << (2 + size_bits);
3390 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3391 &address);
3392 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3393 {
3394 if (imm7_off & 0x40)
3395 address = address - offset;
3396 else
3397 address = address + offset;
3398 }
3399
3400 record_buf_mem[0] = datasize / 8;
3401 record_buf_mem[1] = address;
3402 record_buf_mem[2] = datasize / 8;
3403 record_buf_mem[3] = address + (datasize / 8);
3404 aarch64_insn_r->mem_rec_count = 2;
3405 }
3406 if (bit (aarch64_insn_r->aarch64_insn, 23))
3407 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3408 }
3409 /* Load/store register (unsigned immediate) instructions. */
3410 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3411 {
3412 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3413 if (!(opc >> 1))
3414 if (opc & 0x01)
3415 ld_flag = 0x01;
3416 else
3417 ld_flag = 0x0;
3418 else
3419 if (size_bits != 0x03)
3420 ld_flag = 0x01;
3421 else
3422 return AARCH64_RECORD_UNKNOWN;
3423
3424 if (record_debug)
3425 {
3426 debug_printf ("Process record: load/store (unsigned immediate):"
3427 " size %x V %d opc %x\n", size_bits, vector_flag,
3428 opc);
3429 }
3430
3431 if (!ld_flag)
3432 {
3433 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3434 datasize = 8 << size_bits;
3435 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3436 &address);
3437 offset = offset << size_bits;
3438 address = address + offset;
3439
3440 record_buf_mem[0] = datasize >> 3;
3441 record_buf_mem[1] = address;
3442 aarch64_insn_r->mem_rec_count = 1;
3443 }
3444 else
3445 {
3446 if (vector_flag)
3447 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3448 else
3449 record_buf[0] = reg_rt;
3450 aarch64_insn_r->reg_rec_count = 1;
3451 }
3452 }
3453 /* Load/store register (register offset) instructions. */
3454 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3455 && insn_bits10_11 == 0x02 && insn_bit21)
3456 {
3457 if (record_debug)
3458 debug_printf ("Process record: load/store (register offset)\n");
3459 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3460 if (!(opc >> 1))
3461 if (opc & 0x01)
3462 ld_flag = 0x01;
3463 else
3464 ld_flag = 0x0;
3465 else
3466 if (size_bits != 0x03)
3467 ld_flag = 0x01;
3468 else
3469 return AARCH64_RECORD_UNKNOWN;
3470
3471 if (!ld_flag)
3472 {
3473 uint64_t reg_rm_val;
3474 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3475 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3476 if (bit (aarch64_insn_r->aarch64_insn, 12))
3477 offset = reg_rm_val << size_bits;
3478 else
3479 offset = reg_rm_val;
3480 datasize = 8 << size_bits;
3481 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3482 &address);
3483 address = address + offset;
3484 record_buf_mem[0] = datasize >> 3;
3485 record_buf_mem[1] = address;
3486 aarch64_insn_r->mem_rec_count = 1;
3487 }
3488 else
3489 {
3490 if (vector_flag)
3491 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3492 else
3493 record_buf[0] = reg_rt;
3494 aarch64_insn_r->reg_rec_count = 1;
3495 }
3496 }
3497 /* Load/store register (immediate and unprivileged) instructions. */
3498 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3499 && !insn_bit21)
3500 {
3501 if (record_debug)
3502 {
3503 debug_printf ("Process record: load/store "
3504 "(immediate and unprivileged)\n");
3505 }
3506 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3507 if (!(opc >> 1))
3508 if (opc & 0x01)
3509 ld_flag = 0x01;
3510 else
3511 ld_flag = 0x0;
3512 else
3513 if (size_bits != 0x03)
3514 ld_flag = 0x01;
3515 else
3516 return AARCH64_RECORD_UNKNOWN;
3517
3518 if (!ld_flag)
3519 {
3520 uint16_t imm9_off;
3521 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3522 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3523 datasize = 8 << size_bits;
3524 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3525 &address);
3526 if (insn_bits10_11 != 0x01)
3527 {
3528 if (imm9_off & 0x0100)
3529 address = address - offset;
3530 else
3531 address = address + offset;
3532 }
3533 record_buf_mem[0] = datasize >> 3;
3534 record_buf_mem[1] = address;
3535 aarch64_insn_r->mem_rec_count = 1;
3536 }
3537 else
3538 {
3539 if (vector_flag)
3540 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3541 else
3542 record_buf[0] = reg_rt;
3543 aarch64_insn_r->reg_rec_count = 1;
3544 }
3545 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3546 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3547 }
3548 /* Advanced SIMD load/store instructions. */
3549 else
3550 return aarch64_record_asimd_load_store (aarch64_insn_r);
3551
3552 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3553 record_buf_mem);
3554 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3555 record_buf);
3556 return AARCH64_RECORD_SUCCESS;
3557 }
3558
3559 /* Record handler for data processing SIMD and floating point instructions. */
3560
3561 static unsigned int
3562 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3563 {
3564 uint8_t insn_bit21, opcode, rmode, reg_rd;
3565 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3566 uint8_t insn_bits11_14;
3567 uint32_t record_buf[2];
3568
3569 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3570 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3571 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3572 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3573 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3574 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3575 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3576 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3577 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3578
3579 if (record_debug)
3580 debug_printf ("Process record: data processing SIMD/FP: ");
3581
3582 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3583 {
3584 /* Floating point - fixed point conversion instructions. */
3585 if (!insn_bit21)
3586 {
3587 if (record_debug)
3588 debug_printf ("FP - fixed point conversion");
3589
3590 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3591 record_buf[0] = reg_rd;
3592 else
3593 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3594 }
3595 /* Floating point - conditional compare instructions. */
3596 else if (insn_bits10_11 == 0x01)
3597 {
3598 if (record_debug)
3599 debug_printf ("FP - conditional compare");
3600
3601 record_buf[0] = AARCH64_CPSR_REGNUM;
3602 }
3603 /* Floating point - data processing (2-source) and
3604 conditional select instructions. */
3605 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3606 {
3607 if (record_debug)
3608 debug_printf ("FP - DP (2-source)");
3609
3610 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3611 }
3612 else if (insn_bits10_11 == 0x00)
3613 {
3614 /* Floating point - immediate instructions. */
3615 if ((insn_bits12_15 & 0x01) == 0x01
3616 || (insn_bits12_15 & 0x07) == 0x04)
3617 {
3618 if (record_debug)
3619 debug_printf ("FP - immediate");
3620 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3621 }
3622 /* Floating point - compare instructions. */
3623 else if ((insn_bits12_15 & 0x03) == 0x02)
3624 {
3625 if (record_debug)
3626 debug_printf ("FP - immediate");
3627 record_buf[0] = AARCH64_CPSR_REGNUM;
3628 }
3629 /* Floating point - integer conversions instructions. */
3630 else if (insn_bits12_15 == 0x00)
3631 {
3632 /* Convert float to integer instruction. */
3633 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3634 {
3635 if (record_debug)
3636 debug_printf ("float to int conversion");
3637
3638 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3639 }
3640 /* Convert integer to float instruction. */
3641 else if ((opcode >> 1) == 0x01 && !rmode)
3642 {
3643 if (record_debug)
3644 debug_printf ("int to float conversion");
3645
3646 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3647 }
3648 /* Move float to integer instruction. */
3649 else if ((opcode >> 1) == 0x03)
3650 {
3651 if (record_debug)
3652 debug_printf ("move float to int");
3653
3654 if (!(opcode & 0x01))
3655 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3656 else
3657 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3658 }
3659 else
3660 return AARCH64_RECORD_UNKNOWN;
3661 }
3662 else
3663 return AARCH64_RECORD_UNKNOWN;
3664 }
3665 else
3666 return AARCH64_RECORD_UNKNOWN;
3667 }
3668 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3669 {
3670 if (record_debug)
3671 debug_printf ("SIMD copy");
3672
3673 /* Advanced SIMD copy instructions. */
3674 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3675 && !bit (aarch64_insn_r->aarch64_insn, 15)
3676 && bit (aarch64_insn_r->aarch64_insn, 10))
3677 {
3678 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3679 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3680 else
3681 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3682 }
3683 else
3684 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3685 }
3686 /* All remaining floating point or advanced SIMD instructions. */
3687 else
3688 {
3689 if (record_debug)
3690 debug_printf ("all remain");
3691
3692 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3693 }
3694
3695 if (record_debug)
3696 debug_printf ("\n");
3697
3698 aarch64_insn_r->reg_rec_count++;
3699 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3700 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3701 record_buf);
3702 return AARCH64_RECORD_SUCCESS;
3703 }
3704
3705 /* Decodes insns type and invokes its record handler. */
3706
3707 static unsigned int
3708 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3709 {
3710 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3711
3712 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3713 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3714 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3715 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3716
3717 /* Data processing - immediate instructions. */
3718 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3719 return aarch64_record_data_proc_imm (aarch64_insn_r);
3720
3721 /* Branch, exception generation and system instructions. */
3722 if (ins_bit26 && !ins_bit27 && ins_bit28)
3723 return aarch64_record_branch_except_sys (aarch64_insn_r);
3724
3725 /* Load and store instructions. */
3726 if (!ins_bit25 && ins_bit27)
3727 return aarch64_record_load_store (aarch64_insn_r);
3728
3729 /* Data processing - register instructions. */
3730 if (ins_bit25 && !ins_bit26 && ins_bit27)
3731 return aarch64_record_data_proc_reg (aarch64_insn_r);
3732
3733 /* Data processing - SIMD and floating point instructions. */
3734 if (ins_bit25 && ins_bit26 && ins_bit27)
3735 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3736
3737 return AARCH64_RECORD_UNSUPPORTED;
3738 }
3739
3740 /* Cleans up local record registers and memory allocations. */
3741
3742 static void
3743 deallocate_reg_mem (insn_decode_record *record)
3744 {
3745 xfree (record->aarch64_regs);
3746 xfree (record->aarch64_mems);
3747 }
3748
3749 /* Parse the current instruction and record the values of the registers and
3750 memory that will be changed in current instruction to record_arch_list
3751 return -1 if something is wrong. */
3752
3753 int
3754 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3755 CORE_ADDR insn_addr)
3756 {
3757 uint32_t rec_no = 0;
3758 uint8_t insn_size = 4;
3759 uint32_t ret = 0;
3760 ULONGEST t_bit = 0, insn_id = 0;
3761 gdb_byte buf[insn_size];
3762 insn_decode_record aarch64_record;
3763
3764 memset (&buf[0], 0, insn_size);
3765 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3766 target_read_memory (insn_addr, &buf[0], insn_size);
3767 aarch64_record.aarch64_insn
3768 = (uint32_t) extract_unsigned_integer (&buf[0],
3769 insn_size,
3770 gdbarch_byte_order (gdbarch));
3771 aarch64_record.regcache = regcache;
3772 aarch64_record.this_addr = insn_addr;
3773 aarch64_record.gdbarch = gdbarch;
3774
3775 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3776 if (ret == AARCH64_RECORD_UNSUPPORTED)
3777 {
3778 printf_unfiltered (_("Process record does not support instruction "
3779 "0x%0x at address %s.\n"),
3780 aarch64_record.aarch64_insn,
3781 paddress (gdbarch, insn_addr));
3782 ret = -1;
3783 }
3784
3785 if (0 == ret)
3786 {
3787 /* Record registers. */
3788 record_full_arch_list_add_reg (aarch64_record.regcache,
3789 AARCH64_PC_REGNUM);
3790 /* Always record register CPSR. */
3791 record_full_arch_list_add_reg (aarch64_record.regcache,
3792 AARCH64_CPSR_REGNUM);
3793 if (aarch64_record.aarch64_regs)
3794 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3795 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3796 aarch64_record.aarch64_regs[rec_no]))
3797 ret = -1;
3798
3799 /* Record memories. */
3800 if (aarch64_record.aarch64_mems)
3801 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3802 if (record_full_arch_list_add_mem
3803 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3804 aarch64_record.aarch64_mems[rec_no].len))
3805 ret = -1;
3806
3807 if (record_full_arch_list_add_end ())
3808 ret = -1;
3809 }
3810
3811 deallocate_reg_mem (&aarch64_record);
3812 return ret;
3813 }
This page took 0.145259 seconds and 4 git commands to generate.