arc: New Synopsys ARC port
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63 #include <algorithm>
64
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
69 /* Pseudo register base numbers. */
70 #define AARCH64_Q0_REGNUM 0
71 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
72 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* AArch64 prologue cache structure. */
160 struct aarch64_prologue_cache
161 {
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
164 CORE_ADDR func;
165
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
168 stub frame. */
169 CORE_ADDR prev_pc;
170
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
174 CORE_ADDR prev_sp;
175
176 /* Is the target available to read from? */
177 int available_p;
178
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
182 int framesize;
183
184 /* The register used to hold the frame pointer for this frame. */
185 int framereg;
186
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg *saved_regs;
189 };
190
191 static void
192 show_aarch64_debug (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
194 {
195 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
196 }
197
198 /* Analyze a prologue, looking for a recognizable stack frame
199 and frame pointer. Scan until we encounter a store that could
200 clobber the stack frame unexpectedly, or an unknown instruction. */
201
202 static CORE_ADDR
203 aarch64_analyze_prologue (struct gdbarch *gdbarch,
204 CORE_ADDR start, CORE_ADDR limit,
205 struct aarch64_prologue_cache *cache)
206 {
207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
208 int i;
209 pv_t regs[AARCH64_X_REGISTER_COUNT];
210 struct pv_area *stack;
211 struct cleanup *back_to;
212
213 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
214 regs[i] = pv_register (i, 0);
215 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
216 back_to = make_cleanup_free_pv_area (stack);
217
218 for (; start < limit; start += 4)
219 {
220 uint32_t insn;
221 aarch64_inst inst;
222
223 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
224
225 if (aarch64_decode_insn (insn, &inst, 1) != 0)
226 break;
227
228 if (inst.opcode->iclass == addsub_imm
229 && (inst.opcode->op == OP_ADD
230 || strcmp ("sub", inst.opcode->name) == 0))
231 {
232 unsigned rd = inst.operands[0].reg.regno;
233 unsigned rn = inst.operands[1].reg.regno;
234
235 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
236 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
237 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
238 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
239
240 if (inst.opcode->op == OP_ADD)
241 {
242 regs[rd] = pv_add_constant (regs[rn],
243 inst.operands[2].imm.value);
244 }
245 else
246 {
247 regs[rd] = pv_add_constant (regs[rn],
248 -inst.operands[2].imm.value);
249 }
250 }
251 else if (inst.opcode->iclass == pcreladdr
252 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
253 {
254 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
255 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
256
257 regs[inst.operands[0].reg.regno] = pv_unknown ();
258 }
259 else if (inst.opcode->iclass == branch_imm)
260 {
261 /* Stop analysis on branch. */
262 break;
263 }
264 else if (inst.opcode->iclass == condbranch)
265 {
266 /* Stop analysis on branch. */
267 break;
268 }
269 else if (inst.opcode->iclass == branch_reg)
270 {
271 /* Stop analysis on branch. */
272 break;
273 }
274 else if (inst.opcode->iclass == compbranch)
275 {
276 /* Stop analysis on branch. */
277 break;
278 }
279 else if (inst.opcode->op == OP_MOVZ)
280 {
281 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
282 regs[inst.operands[0].reg.regno] = pv_unknown ();
283 }
284 else if (inst.opcode->iclass == log_shift
285 && strcmp (inst.opcode->name, "orr") == 0)
286 {
287 unsigned rd = inst.operands[0].reg.regno;
288 unsigned rn = inst.operands[1].reg.regno;
289 unsigned rm = inst.operands[2].reg.regno;
290
291 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
292 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
293 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
294
295 if (inst.operands[2].shifter.amount == 0
296 && rn == AARCH64_SP_REGNUM)
297 regs[rd] = regs[rm];
298 else
299 {
300 if (aarch64_debug)
301 {
302 debug_printf ("aarch64: prologue analysis gave up "
303 "addr=%s opcode=0x%x (orr x register)\n",
304 core_addr_to_string_nz (start), insn);
305 }
306 break;
307 }
308 }
309 else if (inst.opcode->op == OP_STUR)
310 {
311 unsigned rt = inst.operands[0].reg.regno;
312 unsigned rn = inst.operands[1].addr.base_regno;
313 int is64
314 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
319 gdb_assert (!inst.operands[1].addr.offset.is_reg);
320
321 pv_area_store (stack, pv_add_constant (regs[rn],
322 inst.operands[1].addr.offset.imm),
323 is64 ? 8 : 4, regs[rt]);
324 }
325 else if ((inst.opcode->iclass == ldstpair_off
326 || (inst.opcode->iclass == ldstpair_indexed
327 && inst.operands[2].addr.preind))
328 && strcmp ("stp", inst.opcode->name) == 0)
329 {
330 /* STP with addressing mode Pre-indexed and Base register. */
331 unsigned rt1 = inst.operands[0].reg.regno;
332 unsigned rt2 = inst.operands[1].reg.regno;
333 unsigned rn = inst.operands[2].addr.base_regno;
334 int32_t imm = inst.operands[2].addr.offset.imm;
335
336 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
337 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
338 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
339 gdb_assert (!inst.operands[2].addr.offset.is_reg);
340
341 /* If recording this store would invalidate the store area
342 (perhaps because rn is not known) then we should abandon
343 further prologue analysis. */
344 if (pv_area_store_would_trash (stack,
345 pv_add_constant (regs[rn], imm)))
346 break;
347
348 if (pv_area_store_would_trash (stack,
349 pv_add_constant (regs[rn], imm + 8)))
350 break;
351
352 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
353 regs[rt1]);
354 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
355 regs[rt2]);
356
357 if (inst.operands[2].addr.writeback)
358 regs[rn] = pv_add_constant (regs[rn], imm);
359
360 }
361 else if (inst.opcode->iclass == testbranch)
362 {
363 /* Stop analysis on branch. */
364 break;
365 }
366 else
367 {
368 if (aarch64_debug)
369 {
370 debug_printf ("aarch64: prologue analysis gave up addr=%s"
371 " opcode=0x%x\n",
372 core_addr_to_string_nz (start), insn);
373 }
374 break;
375 }
376 }
377
378 if (cache == NULL)
379 {
380 do_cleanups (back_to);
381 return start;
382 }
383
384 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
385 {
386 /* Frame pointer is fp. Frame size is constant. */
387 cache->framereg = AARCH64_FP_REGNUM;
388 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
389 }
390 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
391 {
392 /* Try the stack pointer. */
393 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
394 cache->framereg = AARCH64_SP_REGNUM;
395 }
396 else
397 {
398 /* We're just out of luck. We don't know where the frame is. */
399 cache->framereg = -1;
400 cache->framesize = 0;
401 }
402
403 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
404 {
405 CORE_ADDR offset;
406
407 if (pv_area_find_reg (stack, gdbarch, i, &offset))
408 cache->saved_regs[i].addr = offset;
409 }
410
411 do_cleanups (back_to);
412 return start;
413 }
414
415 /* Implement the "skip_prologue" gdbarch method. */
416
417 static CORE_ADDR
418 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
419 {
420 CORE_ADDR func_addr, limit_pc;
421
422 /* See if we can determine the end of the prologue via the symbol
423 table. If so, then return either PC, or the PC after the
424 prologue, whichever is greater. */
425 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
426 {
427 CORE_ADDR post_prologue_pc
428 = skip_prologue_using_sal (gdbarch, func_addr);
429
430 if (post_prologue_pc != 0)
431 return std::max (pc, post_prologue_pc);
432 }
433
434 /* Can't determine prologue from the symbol table, need to examine
435 instructions. */
436
437 /* Find an upper limit on the function prologue using the debug
438 information. If the debug information could not be used to
439 provide that bound, then use an arbitrary large number as the
440 upper bound. */
441 limit_pc = skip_prologue_using_sal (gdbarch, pc);
442 if (limit_pc == 0)
443 limit_pc = pc + 128; /* Magic. */
444
445 /* Try disassembling prologue. */
446 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
447 }
448
449 /* Scan the function prologue for THIS_FRAME and populate the prologue
450 cache CACHE. */
451
452 static void
453 aarch64_scan_prologue (struct frame_info *this_frame,
454 struct aarch64_prologue_cache *cache)
455 {
456 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
457 CORE_ADDR prologue_start;
458 CORE_ADDR prologue_end;
459 CORE_ADDR prev_pc = get_frame_pc (this_frame);
460 struct gdbarch *gdbarch = get_frame_arch (this_frame);
461
462 cache->prev_pc = prev_pc;
463
464 /* Assume we do not find a frame. */
465 cache->framereg = -1;
466 cache->framesize = 0;
467
468 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
469 &prologue_end))
470 {
471 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
472
473 if (sal.line == 0)
474 {
475 /* No line info so use the current PC. */
476 prologue_end = prev_pc;
477 }
478 else if (sal.end < prologue_end)
479 {
480 /* The next line begins after the function end. */
481 prologue_end = sal.end;
482 }
483
484 prologue_end = std::min (prologue_end, prev_pc);
485 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
486 }
487 else
488 {
489 CORE_ADDR frame_loc;
490
491 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
492 if (frame_loc == 0)
493 return;
494
495 cache->framereg = AARCH64_FP_REGNUM;
496 cache->framesize = 16;
497 cache->saved_regs[29].addr = 0;
498 cache->saved_regs[30].addr = 8;
499 }
500 }
501
502 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
503 function may throw an exception if the inferior's registers or memory is
504 not available. */
505
506 static void
507 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
508 struct aarch64_prologue_cache *cache)
509 {
510 CORE_ADDR unwound_fp;
511 int reg;
512
513 aarch64_scan_prologue (this_frame, cache);
514
515 if (cache->framereg == -1)
516 return;
517
518 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
519 if (unwound_fp == 0)
520 return;
521
522 cache->prev_sp = unwound_fp + cache->framesize;
523
524 /* Calculate actual addresses of saved registers using offsets
525 determined by aarch64_analyze_prologue. */
526 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
527 if (trad_frame_addr_p (cache->saved_regs, reg))
528 cache->saved_regs[reg].addr += cache->prev_sp;
529
530 cache->func = get_frame_func (this_frame);
531
532 cache->available_p = 1;
533 }
534
535 /* Allocate and fill in *THIS_CACHE with information about the prologue of
536 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
537 Return a pointer to the current aarch64_prologue_cache in
538 *THIS_CACHE. */
539
540 static struct aarch64_prologue_cache *
541 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
542 {
543 struct aarch64_prologue_cache *cache;
544
545 if (*this_cache != NULL)
546 return (struct aarch64_prologue_cache *) *this_cache;
547
548 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
549 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
550 *this_cache = cache;
551
552 TRY
553 {
554 aarch64_make_prologue_cache_1 (this_frame, cache);
555 }
556 CATCH (ex, RETURN_MASK_ERROR)
557 {
558 if (ex.error != NOT_AVAILABLE_ERROR)
559 throw_exception (ex);
560 }
561 END_CATCH
562
563 return cache;
564 }
565
566 /* Implement the "stop_reason" frame_unwind method. */
567
568 static enum unwind_stop_reason
569 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
570 void **this_cache)
571 {
572 struct aarch64_prologue_cache *cache
573 = aarch64_make_prologue_cache (this_frame, this_cache);
574
575 if (!cache->available_p)
576 return UNWIND_UNAVAILABLE;
577
578 /* Halt the backtrace at "_start". */
579 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
580 return UNWIND_OUTERMOST;
581
582 /* We've hit a wall, stop. */
583 if (cache->prev_sp == 0)
584 return UNWIND_OUTERMOST;
585
586 return UNWIND_NO_REASON;
587 }
588
589 /* Our frame ID for a normal frame is the current function's starting
590 PC and the caller's SP when we were called. */
591
592 static void
593 aarch64_prologue_this_id (struct frame_info *this_frame,
594 void **this_cache, struct frame_id *this_id)
595 {
596 struct aarch64_prologue_cache *cache
597 = aarch64_make_prologue_cache (this_frame, this_cache);
598
599 if (!cache->available_p)
600 *this_id = frame_id_build_unavailable_stack (cache->func);
601 else
602 *this_id = frame_id_build (cache->prev_sp, cache->func);
603 }
604
605 /* Implement the "prev_register" frame_unwind method. */
606
607 static struct value *
608 aarch64_prologue_prev_register (struct frame_info *this_frame,
609 void **this_cache, int prev_regnum)
610 {
611 struct aarch64_prologue_cache *cache
612 = aarch64_make_prologue_cache (this_frame, this_cache);
613
614 /* If we are asked to unwind the PC, then we need to return the LR
615 instead. The prologue may save PC, but it will point into this
616 frame's prologue, not the next frame's resume location. */
617 if (prev_regnum == AARCH64_PC_REGNUM)
618 {
619 CORE_ADDR lr;
620
621 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
622 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
623 }
624
625 /* SP is generally not saved to the stack, but this frame is
626 identified by the next frame's stack pointer at the time of the
627 call. The value was already reconstructed into PREV_SP. */
628 /*
629 +----------+ ^
630 | saved lr | |
631 +->| saved fp |--+
632 | | |
633 | | | <- Previous SP
634 | +----------+
635 | | saved lr |
636 +--| saved fp |<- FP
637 | |
638 | |<- SP
639 +----------+ */
640 if (prev_regnum == AARCH64_SP_REGNUM)
641 return frame_unwind_got_constant (this_frame, prev_regnum,
642 cache->prev_sp);
643
644 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
645 prev_regnum);
646 }
647
648 /* AArch64 prologue unwinder. */
649 struct frame_unwind aarch64_prologue_unwind =
650 {
651 NORMAL_FRAME,
652 aarch64_prologue_frame_unwind_stop_reason,
653 aarch64_prologue_this_id,
654 aarch64_prologue_prev_register,
655 NULL,
656 default_frame_sniffer
657 };
658
659 /* Allocate and fill in *THIS_CACHE with information about the prologue of
660 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
661 Return a pointer to the current aarch64_prologue_cache in
662 *THIS_CACHE. */
663
664 static struct aarch64_prologue_cache *
665 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
666 {
667 struct aarch64_prologue_cache *cache;
668
669 if (*this_cache != NULL)
670 return (struct aarch64_prologue_cache *) *this_cache;
671
672 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
673 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
674 *this_cache = cache;
675
676 TRY
677 {
678 cache->prev_sp = get_frame_register_unsigned (this_frame,
679 AARCH64_SP_REGNUM);
680 cache->prev_pc = get_frame_pc (this_frame);
681 cache->available_p = 1;
682 }
683 CATCH (ex, RETURN_MASK_ERROR)
684 {
685 if (ex.error != NOT_AVAILABLE_ERROR)
686 throw_exception (ex);
687 }
688 END_CATCH
689
690 return cache;
691 }
692
693 /* Implement the "stop_reason" frame_unwind method. */
694
695 static enum unwind_stop_reason
696 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
697 void **this_cache)
698 {
699 struct aarch64_prologue_cache *cache
700 = aarch64_make_stub_cache (this_frame, this_cache);
701
702 if (!cache->available_p)
703 return UNWIND_UNAVAILABLE;
704
705 return UNWIND_NO_REASON;
706 }
707
708 /* Our frame ID for a stub frame is the current SP and LR. */
709
710 static void
711 aarch64_stub_this_id (struct frame_info *this_frame,
712 void **this_cache, struct frame_id *this_id)
713 {
714 struct aarch64_prologue_cache *cache
715 = aarch64_make_stub_cache (this_frame, this_cache);
716
717 if (cache->available_p)
718 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
719 else
720 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
721 }
722
723 /* Implement the "sniffer" frame_unwind method. */
724
725 static int
726 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
727 struct frame_info *this_frame,
728 void **this_prologue_cache)
729 {
730 CORE_ADDR addr_in_block;
731 gdb_byte dummy[4];
732
733 addr_in_block = get_frame_address_in_block (this_frame);
734 if (in_plt_section (addr_in_block)
735 /* We also use the stub winder if the target memory is unreadable
736 to avoid having the prologue unwinder trying to read it. */
737 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
738 return 1;
739
740 return 0;
741 }
742
743 /* AArch64 stub unwinder. */
744 struct frame_unwind aarch64_stub_unwind =
745 {
746 NORMAL_FRAME,
747 aarch64_stub_frame_unwind_stop_reason,
748 aarch64_stub_this_id,
749 aarch64_prologue_prev_register,
750 NULL,
751 aarch64_stub_unwind_sniffer
752 };
753
754 /* Return the frame base address of *THIS_FRAME. */
755
756 static CORE_ADDR
757 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
758 {
759 struct aarch64_prologue_cache *cache
760 = aarch64_make_prologue_cache (this_frame, this_cache);
761
762 return cache->prev_sp - cache->framesize;
763 }
764
765 /* AArch64 default frame base information. */
766 struct frame_base aarch64_normal_base =
767 {
768 &aarch64_prologue_unwind,
769 aarch64_normal_frame_base,
770 aarch64_normal_frame_base,
771 aarch64_normal_frame_base
772 };
773
774 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
775 dummy frame. The frame ID's base needs to match the TOS value
776 saved by save_dummy_frame_tos () and returned from
777 aarch64_push_dummy_call, and the PC needs to match the dummy
778 frame's breakpoint. */
779
780 static struct frame_id
781 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
782 {
783 return frame_id_build (get_frame_register_unsigned (this_frame,
784 AARCH64_SP_REGNUM),
785 get_frame_pc (this_frame));
786 }
787
788 /* Implement the "unwind_pc" gdbarch method. */
789
790 static CORE_ADDR
791 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
792 {
793 CORE_ADDR pc
794 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
795
796 return pc;
797 }
798
799 /* Implement the "unwind_sp" gdbarch method. */
800
801 static CORE_ADDR
802 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
803 {
804 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
805 }
806
807 /* Return the value of the REGNUM register in the previous frame of
808 *THIS_FRAME. */
809
810 static struct value *
811 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
812 void **this_cache, int regnum)
813 {
814 CORE_ADDR lr;
815
816 switch (regnum)
817 {
818 case AARCH64_PC_REGNUM:
819 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
820 return frame_unwind_got_constant (this_frame, regnum, lr);
821
822 default:
823 internal_error (__FILE__, __LINE__,
824 _("Unexpected register %d"), regnum);
825 }
826 }
827
828 /* Implement the "init_reg" dwarf2_frame_ops method. */
829
830 static void
831 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
832 struct dwarf2_frame_state_reg *reg,
833 struct frame_info *this_frame)
834 {
835 switch (regnum)
836 {
837 case AARCH64_PC_REGNUM:
838 reg->how = DWARF2_FRAME_REG_FN;
839 reg->loc.fn = aarch64_dwarf2_prev_register;
840 break;
841 case AARCH64_SP_REGNUM:
842 reg->how = DWARF2_FRAME_REG_CFA;
843 break;
844 }
845 }
846
847 /* When arguments must be pushed onto the stack, they go on in reverse
848 order. The code below implements a FILO (stack) to do this. */
849
850 typedef struct
851 {
852 /* Value to pass on stack. It can be NULL if this item is for stack
853 padding. */
854 const gdb_byte *data;
855
856 /* Size in bytes of value to pass on stack. */
857 int len;
858 } stack_item_t;
859
860 DEF_VEC_O (stack_item_t);
861
862 /* Return the alignment (in bytes) of the given type. */
863
864 static int
865 aarch64_type_align (struct type *t)
866 {
867 int n;
868 int align;
869 int falign;
870
871 t = check_typedef (t);
872 switch (TYPE_CODE (t))
873 {
874 default:
875 /* Should never happen. */
876 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
877 return 4;
878
879 case TYPE_CODE_PTR:
880 case TYPE_CODE_ENUM:
881 case TYPE_CODE_INT:
882 case TYPE_CODE_FLT:
883 case TYPE_CODE_SET:
884 case TYPE_CODE_RANGE:
885 case TYPE_CODE_BITSTRING:
886 case TYPE_CODE_REF:
887 case TYPE_CODE_CHAR:
888 case TYPE_CODE_BOOL:
889 return TYPE_LENGTH (t);
890
891 case TYPE_CODE_ARRAY:
892 if (TYPE_VECTOR (t))
893 {
894 /* Use the natural alignment for vector types (the same for
895 scalar type), but the maximum alignment is 128-bit. */
896 if (TYPE_LENGTH (t) > 16)
897 return 16;
898 else
899 return TYPE_LENGTH (t);
900 }
901 else
902 return aarch64_type_align (TYPE_TARGET_TYPE (t));
903 case TYPE_CODE_COMPLEX:
904 return aarch64_type_align (TYPE_TARGET_TYPE (t));
905
906 case TYPE_CODE_STRUCT:
907 case TYPE_CODE_UNION:
908 align = 1;
909 for (n = 0; n < TYPE_NFIELDS (t); n++)
910 {
911 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
912 if (falign > align)
913 align = falign;
914 }
915 return align;
916 }
917 }
918
919 /* Return 1 if *TY is a homogeneous floating-point aggregate or
920 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
921 document; otherwise return 0. */
922
923 static int
924 is_hfa_or_hva (struct type *ty)
925 {
926 switch (TYPE_CODE (ty))
927 {
928 case TYPE_CODE_ARRAY:
929 {
930 struct type *target_ty = TYPE_TARGET_TYPE (ty);
931
932 if (TYPE_VECTOR (ty))
933 return 0;
934
935 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
936 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
937 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
938 && TYPE_VECTOR (target_ty))))
939 return 1;
940 break;
941 }
942
943 case TYPE_CODE_UNION:
944 case TYPE_CODE_STRUCT:
945 {
946 /* HFA or HVA has at most four members. */
947 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
948 {
949 struct type *member0_type;
950
951 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
952 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
953 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
954 && TYPE_VECTOR (member0_type)))
955 {
956 int i;
957
958 for (i = 0; i < TYPE_NFIELDS (ty); i++)
959 {
960 struct type *member1_type;
961
962 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
963 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
964 || (TYPE_LENGTH (member0_type)
965 != TYPE_LENGTH (member1_type)))
966 return 0;
967 }
968 return 1;
969 }
970 }
971 return 0;
972 }
973
974 default:
975 break;
976 }
977
978 return 0;
979 }
980
981 /* AArch64 function call information structure. */
982 struct aarch64_call_info
983 {
984 /* the current argument number. */
985 unsigned argnum;
986
987 /* The next general purpose register number, equivalent to NGRN as
988 described in the AArch64 Procedure Call Standard. */
989 unsigned ngrn;
990
991 /* The next SIMD and floating point register number, equivalent to
992 NSRN as described in the AArch64 Procedure Call Standard. */
993 unsigned nsrn;
994
995 /* The next stacked argument address, equivalent to NSAA as
996 described in the AArch64 Procedure Call Standard. */
997 unsigned nsaa;
998
999 /* Stack item vector. */
1000 VEC(stack_item_t) *si;
1001 };
1002
1003 /* Pass a value in a sequence of consecutive X registers. The caller
1004 is responsbile for ensuring sufficient registers are available. */
1005
1006 static void
1007 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1008 struct aarch64_call_info *info, struct type *type,
1009 struct value *arg)
1010 {
1011 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1012 int len = TYPE_LENGTH (type);
1013 enum type_code typecode = TYPE_CODE (type);
1014 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1015 const bfd_byte *buf = value_contents (arg);
1016
1017 info->argnum++;
1018
1019 while (len > 0)
1020 {
1021 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1022 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1023 byte_order);
1024
1025
1026 /* Adjust sub-word struct/union args when big-endian. */
1027 if (byte_order == BFD_ENDIAN_BIG
1028 && partial_len < X_REGISTER_SIZE
1029 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1030 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1031
1032 if (aarch64_debug)
1033 {
1034 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1035 gdbarch_register_name (gdbarch, regnum),
1036 phex (regval, X_REGISTER_SIZE));
1037 }
1038 regcache_cooked_write_unsigned (regcache, regnum, regval);
1039 len -= partial_len;
1040 buf += partial_len;
1041 regnum++;
1042 }
1043 }
1044
1045 /* Attempt to marshall a value in a V register. Return 1 if
1046 successful, or 0 if insufficient registers are available. This
1047 function, unlike the equivalent pass_in_x() function does not
1048 handle arguments spread across multiple registers. */
1049
1050 static int
1051 pass_in_v (struct gdbarch *gdbarch,
1052 struct regcache *regcache,
1053 struct aarch64_call_info *info,
1054 int len, const bfd_byte *buf)
1055 {
1056 if (info->nsrn < 8)
1057 {
1058 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1059 gdb_byte reg[V_REGISTER_SIZE];
1060
1061 info->argnum++;
1062 info->nsrn++;
1063
1064 memset (reg, 0, sizeof (reg));
1065 /* PCS C.1, the argument is allocated to the least significant
1066 bits of V register. */
1067 memcpy (reg, buf, len);
1068 regcache_cooked_write (regcache, regnum, reg);
1069
1070 if (aarch64_debug)
1071 {
1072 debug_printf ("arg %d in %s\n", info->argnum,
1073 gdbarch_register_name (gdbarch, regnum));
1074 }
1075 return 1;
1076 }
1077 info->nsrn = 8;
1078 return 0;
1079 }
1080
1081 /* Marshall an argument onto the stack. */
1082
1083 static void
1084 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1085 struct value *arg)
1086 {
1087 const bfd_byte *buf = value_contents (arg);
1088 int len = TYPE_LENGTH (type);
1089 int align;
1090 stack_item_t item;
1091
1092 info->argnum++;
1093
1094 align = aarch64_type_align (type);
1095
1096 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1097 Natural alignment of the argument's type. */
1098 align = align_up (align, 8);
1099
1100 /* The AArch64 PCS requires at most doubleword alignment. */
1101 if (align > 16)
1102 align = 16;
1103
1104 if (aarch64_debug)
1105 {
1106 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1107 info->nsaa);
1108 }
1109
1110 item.len = len;
1111 item.data = buf;
1112 VEC_safe_push (stack_item_t, info->si, &item);
1113
1114 info->nsaa += len;
1115 if (info->nsaa & (align - 1))
1116 {
1117 /* Push stack alignment padding. */
1118 int pad = align - (info->nsaa & (align - 1));
1119
1120 item.len = pad;
1121 item.data = NULL;
1122
1123 VEC_safe_push (stack_item_t, info->si, &item);
1124 info->nsaa += pad;
1125 }
1126 }
1127
1128 /* Marshall an argument into a sequence of one or more consecutive X
1129 registers or, if insufficient X registers are available then onto
1130 the stack. */
1131
1132 static void
1133 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1134 struct aarch64_call_info *info, struct type *type,
1135 struct value *arg)
1136 {
1137 int len = TYPE_LENGTH (type);
1138 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1139
1140 /* PCS C.13 - Pass in registers if we have enough spare */
1141 if (info->ngrn + nregs <= 8)
1142 {
1143 pass_in_x (gdbarch, regcache, info, type, arg);
1144 info->ngrn += nregs;
1145 }
1146 else
1147 {
1148 info->ngrn = 8;
1149 pass_on_stack (info, type, arg);
1150 }
1151 }
1152
1153 /* Pass a value in a V register, or on the stack if insufficient are
1154 available. */
1155
1156 static void
1157 pass_in_v_or_stack (struct gdbarch *gdbarch,
1158 struct regcache *regcache,
1159 struct aarch64_call_info *info,
1160 struct type *type,
1161 struct value *arg)
1162 {
1163 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1164 value_contents (arg)))
1165 pass_on_stack (info, type, arg);
1166 }
1167
1168 /* Implement the "push_dummy_call" gdbarch method. */
1169
1170 static CORE_ADDR
1171 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1172 struct regcache *regcache, CORE_ADDR bp_addr,
1173 int nargs,
1174 struct value **args, CORE_ADDR sp, int struct_return,
1175 CORE_ADDR struct_addr)
1176 {
1177 int argnum;
1178 struct aarch64_call_info info;
1179 struct type *func_type;
1180 struct type *return_type;
1181 int lang_struct_return;
1182
1183 memset (&info, 0, sizeof (info));
1184
1185 /* We need to know what the type of the called function is in order
1186 to determine the number of named/anonymous arguments for the
1187 actual argument placement, and the return type in order to handle
1188 return value correctly.
1189
1190 The generic code above us views the decision of return in memory
1191 or return in registers as a two stage processes. The language
1192 handler is consulted first and may decide to return in memory (eg
1193 class with copy constructor returned by value), this will cause
1194 the generic code to allocate space AND insert an initial leading
1195 argument.
1196
1197 If the language code does not decide to pass in memory then the
1198 target code is consulted.
1199
1200 If the language code decides to pass in memory we want to move
1201 the pointer inserted as the initial argument from the argument
1202 list and into X8, the conventional AArch64 struct return pointer
1203 register.
1204
1205 This is slightly awkward, ideally the flag "lang_struct_return"
1206 would be passed to the targets implementation of push_dummy_call.
1207 Rather that change the target interface we call the language code
1208 directly ourselves. */
1209
1210 func_type = check_typedef (value_type (function));
1211
1212 /* Dereference function pointer types. */
1213 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1214 func_type = TYPE_TARGET_TYPE (func_type);
1215
1216 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1217 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1218
1219 /* If language_pass_by_reference () returned true we will have been
1220 given an additional initial argument, a hidden pointer to the
1221 return slot in memory. */
1222 return_type = TYPE_TARGET_TYPE (func_type);
1223 lang_struct_return = language_pass_by_reference (return_type);
1224
1225 /* Set the return address. For the AArch64, the return breakpoint
1226 is always at BP_ADDR. */
1227 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1228
1229 /* If we were given an initial argument for the return slot because
1230 lang_struct_return was true, lose it. */
1231 if (lang_struct_return)
1232 {
1233 args++;
1234 nargs--;
1235 }
1236
1237 /* The struct_return pointer occupies X8. */
1238 if (struct_return || lang_struct_return)
1239 {
1240 if (aarch64_debug)
1241 {
1242 debug_printf ("struct return in %s = 0x%s\n",
1243 gdbarch_register_name (gdbarch,
1244 AARCH64_STRUCT_RETURN_REGNUM),
1245 paddress (gdbarch, struct_addr));
1246 }
1247 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1248 struct_addr);
1249 }
1250
1251 for (argnum = 0; argnum < nargs; argnum++)
1252 {
1253 struct value *arg = args[argnum];
1254 struct type *arg_type;
1255 int len;
1256
1257 arg_type = check_typedef (value_type (arg));
1258 len = TYPE_LENGTH (arg_type);
1259
1260 switch (TYPE_CODE (arg_type))
1261 {
1262 case TYPE_CODE_INT:
1263 case TYPE_CODE_BOOL:
1264 case TYPE_CODE_CHAR:
1265 case TYPE_CODE_RANGE:
1266 case TYPE_CODE_ENUM:
1267 if (len < 4)
1268 {
1269 /* Promote to 32 bit integer. */
1270 if (TYPE_UNSIGNED (arg_type))
1271 arg_type = builtin_type (gdbarch)->builtin_uint32;
1272 else
1273 arg_type = builtin_type (gdbarch)->builtin_int32;
1274 arg = value_cast (arg_type, arg);
1275 }
1276 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1277 break;
1278
1279 case TYPE_CODE_COMPLEX:
1280 if (info.nsrn <= 6)
1281 {
1282 const bfd_byte *buf = value_contents (arg);
1283 struct type *target_type =
1284 check_typedef (TYPE_TARGET_TYPE (arg_type));
1285
1286 pass_in_v (gdbarch, regcache, &info,
1287 TYPE_LENGTH (target_type), buf);
1288 pass_in_v (gdbarch, regcache, &info,
1289 TYPE_LENGTH (target_type),
1290 buf + TYPE_LENGTH (target_type));
1291 }
1292 else
1293 {
1294 info.nsrn = 8;
1295 pass_on_stack (&info, arg_type, arg);
1296 }
1297 break;
1298 case TYPE_CODE_FLT:
1299 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1300 break;
1301
1302 case TYPE_CODE_STRUCT:
1303 case TYPE_CODE_ARRAY:
1304 case TYPE_CODE_UNION:
1305 if (is_hfa_or_hva (arg_type))
1306 {
1307 int elements = TYPE_NFIELDS (arg_type);
1308
1309 /* Homogeneous Aggregates */
1310 if (info.nsrn + elements < 8)
1311 {
1312 int i;
1313
1314 for (i = 0; i < elements; i++)
1315 {
1316 /* We know that we have sufficient registers
1317 available therefore this will never fallback
1318 to the stack. */
1319 struct value *field =
1320 value_primitive_field (arg, 0, i, arg_type);
1321 struct type *field_type =
1322 check_typedef (value_type (field));
1323
1324 pass_in_v_or_stack (gdbarch, regcache, &info,
1325 field_type, field);
1326 }
1327 }
1328 else
1329 {
1330 info.nsrn = 8;
1331 pass_on_stack (&info, arg_type, arg);
1332 }
1333 }
1334 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1335 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1336 {
1337 /* Short vector types are passed in V registers. */
1338 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1339 }
1340 else if (len > 16)
1341 {
1342 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1343 invisible reference. */
1344
1345 /* Allocate aligned storage. */
1346 sp = align_down (sp - len, 16);
1347
1348 /* Write the real data into the stack. */
1349 write_memory (sp, value_contents (arg), len);
1350
1351 /* Construct the indirection. */
1352 arg_type = lookup_pointer_type (arg_type);
1353 arg = value_from_pointer (arg_type, sp);
1354 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1355 }
1356 else
1357 /* PCS C.15 / C.18 multiple values pass. */
1358 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1359 break;
1360
1361 default:
1362 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1363 break;
1364 }
1365 }
1366
1367 /* Make sure stack retains 16 byte alignment. */
1368 if (info.nsaa & 15)
1369 sp -= 16 - (info.nsaa & 15);
1370
1371 while (!VEC_empty (stack_item_t, info.si))
1372 {
1373 stack_item_t *si = VEC_last (stack_item_t, info.si);
1374
1375 sp -= si->len;
1376 if (si->data != NULL)
1377 write_memory (sp, si->data, si->len);
1378 VEC_pop (stack_item_t, info.si);
1379 }
1380
1381 VEC_free (stack_item_t, info.si);
1382
1383 /* Finally, update the SP register. */
1384 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1385
1386 return sp;
1387 }
1388
1389 /* Implement the "frame_align" gdbarch method. */
1390
1391 static CORE_ADDR
1392 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1393 {
1394 /* Align the stack to sixteen bytes. */
1395 return sp & ~(CORE_ADDR) 15;
1396 }
1397
1398 /* Return the type for an AdvSISD Q register. */
1399
1400 static struct type *
1401 aarch64_vnq_type (struct gdbarch *gdbarch)
1402 {
1403 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1404
1405 if (tdep->vnq_type == NULL)
1406 {
1407 struct type *t;
1408 struct type *elem;
1409
1410 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1411 TYPE_CODE_UNION);
1412
1413 elem = builtin_type (gdbarch)->builtin_uint128;
1414 append_composite_type_field (t, "u", elem);
1415
1416 elem = builtin_type (gdbarch)->builtin_int128;
1417 append_composite_type_field (t, "s", elem);
1418
1419 tdep->vnq_type = t;
1420 }
1421
1422 return tdep->vnq_type;
1423 }
1424
1425 /* Return the type for an AdvSISD D register. */
1426
1427 static struct type *
1428 aarch64_vnd_type (struct gdbarch *gdbarch)
1429 {
1430 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1431
1432 if (tdep->vnd_type == NULL)
1433 {
1434 struct type *t;
1435 struct type *elem;
1436
1437 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1438 TYPE_CODE_UNION);
1439
1440 elem = builtin_type (gdbarch)->builtin_double;
1441 append_composite_type_field (t, "f", elem);
1442
1443 elem = builtin_type (gdbarch)->builtin_uint64;
1444 append_composite_type_field (t, "u", elem);
1445
1446 elem = builtin_type (gdbarch)->builtin_int64;
1447 append_composite_type_field (t, "s", elem);
1448
1449 tdep->vnd_type = t;
1450 }
1451
1452 return tdep->vnd_type;
1453 }
1454
1455 /* Return the type for an AdvSISD S register. */
1456
1457 static struct type *
1458 aarch64_vns_type (struct gdbarch *gdbarch)
1459 {
1460 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1461
1462 if (tdep->vns_type == NULL)
1463 {
1464 struct type *t;
1465 struct type *elem;
1466
1467 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1468 TYPE_CODE_UNION);
1469
1470 elem = builtin_type (gdbarch)->builtin_float;
1471 append_composite_type_field (t, "f", elem);
1472
1473 elem = builtin_type (gdbarch)->builtin_uint32;
1474 append_composite_type_field (t, "u", elem);
1475
1476 elem = builtin_type (gdbarch)->builtin_int32;
1477 append_composite_type_field (t, "s", elem);
1478
1479 tdep->vns_type = t;
1480 }
1481
1482 return tdep->vns_type;
1483 }
1484
1485 /* Return the type for an AdvSISD H register. */
1486
1487 static struct type *
1488 aarch64_vnh_type (struct gdbarch *gdbarch)
1489 {
1490 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1491
1492 if (tdep->vnh_type == NULL)
1493 {
1494 struct type *t;
1495 struct type *elem;
1496
1497 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1498 TYPE_CODE_UNION);
1499
1500 elem = builtin_type (gdbarch)->builtin_uint16;
1501 append_composite_type_field (t, "u", elem);
1502
1503 elem = builtin_type (gdbarch)->builtin_int16;
1504 append_composite_type_field (t, "s", elem);
1505
1506 tdep->vnh_type = t;
1507 }
1508
1509 return tdep->vnh_type;
1510 }
1511
1512 /* Return the type for an AdvSISD B register. */
1513
1514 static struct type *
1515 aarch64_vnb_type (struct gdbarch *gdbarch)
1516 {
1517 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1518
1519 if (tdep->vnb_type == NULL)
1520 {
1521 struct type *t;
1522 struct type *elem;
1523
1524 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1525 TYPE_CODE_UNION);
1526
1527 elem = builtin_type (gdbarch)->builtin_uint8;
1528 append_composite_type_field (t, "u", elem);
1529
1530 elem = builtin_type (gdbarch)->builtin_int8;
1531 append_composite_type_field (t, "s", elem);
1532
1533 tdep->vnb_type = t;
1534 }
1535
1536 return tdep->vnb_type;
1537 }
1538
1539 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1540
1541 static int
1542 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1543 {
1544 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1545 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1546
1547 if (reg == AARCH64_DWARF_SP)
1548 return AARCH64_SP_REGNUM;
1549
1550 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1551 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1552
1553 return -1;
1554 }
1555 \f
1556
1557 /* Implement the "print_insn" gdbarch method. */
1558
1559 static int
1560 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1561 {
1562 info->symbols = NULL;
1563 return print_insn_aarch64 (memaddr, info);
1564 }
1565
1566 /* AArch64 BRK software debug mode instruction.
1567 Note that AArch64 code is always little-endian.
1568 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1569 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1570
1571 /* Implement the "breakpoint_from_pc" gdbarch method. */
1572
1573 static const gdb_byte *
1574 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1575 int *lenptr)
1576 {
1577 *lenptr = sizeof (aarch64_default_breakpoint);
1578 return aarch64_default_breakpoint;
1579 }
1580
1581 /* Extract from an array REGS containing the (raw) register state a
1582 function return value of type TYPE, and copy that, in virtual
1583 format, into VALBUF. */
1584
1585 static void
1586 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1587 gdb_byte *valbuf)
1588 {
1589 struct gdbarch *gdbarch = get_regcache_arch (regs);
1590 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1591
1592 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1593 {
1594 bfd_byte buf[V_REGISTER_SIZE];
1595 int len = TYPE_LENGTH (type);
1596
1597 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1598 memcpy (valbuf, buf, len);
1599 }
1600 else if (TYPE_CODE (type) == TYPE_CODE_INT
1601 || TYPE_CODE (type) == TYPE_CODE_CHAR
1602 || TYPE_CODE (type) == TYPE_CODE_BOOL
1603 || TYPE_CODE (type) == TYPE_CODE_PTR
1604 || TYPE_CODE (type) == TYPE_CODE_REF
1605 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1606 {
1607 /* If the the type is a plain integer, then the access is
1608 straight-forward. Otherwise we have to play around a bit
1609 more. */
1610 int len = TYPE_LENGTH (type);
1611 int regno = AARCH64_X0_REGNUM;
1612 ULONGEST tmp;
1613
1614 while (len > 0)
1615 {
1616 /* By using store_unsigned_integer we avoid having to do
1617 anything special for small big-endian values. */
1618 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1619 store_unsigned_integer (valbuf,
1620 (len > X_REGISTER_SIZE
1621 ? X_REGISTER_SIZE : len), byte_order, tmp);
1622 len -= X_REGISTER_SIZE;
1623 valbuf += X_REGISTER_SIZE;
1624 }
1625 }
1626 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1627 {
1628 int regno = AARCH64_V0_REGNUM;
1629 bfd_byte buf[V_REGISTER_SIZE];
1630 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1631 int len = TYPE_LENGTH (target_type);
1632
1633 regcache_cooked_read (regs, regno, buf);
1634 memcpy (valbuf, buf, len);
1635 valbuf += len;
1636 regcache_cooked_read (regs, regno + 1, buf);
1637 memcpy (valbuf, buf, len);
1638 valbuf += len;
1639 }
1640 else if (is_hfa_or_hva (type))
1641 {
1642 int elements = TYPE_NFIELDS (type);
1643 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1644 int len = TYPE_LENGTH (member_type);
1645 int i;
1646
1647 for (i = 0; i < elements; i++)
1648 {
1649 int regno = AARCH64_V0_REGNUM + i;
1650 bfd_byte buf[V_REGISTER_SIZE];
1651
1652 if (aarch64_debug)
1653 {
1654 debug_printf ("read HFA or HVA return value element %d from %s\n",
1655 i + 1,
1656 gdbarch_register_name (gdbarch, regno));
1657 }
1658 regcache_cooked_read (regs, regno, buf);
1659
1660 memcpy (valbuf, buf, len);
1661 valbuf += len;
1662 }
1663 }
1664 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1665 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1666 {
1667 /* Short vector is returned in V register. */
1668 gdb_byte buf[V_REGISTER_SIZE];
1669
1670 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1671 memcpy (valbuf, buf, TYPE_LENGTH (type));
1672 }
1673 else
1674 {
1675 /* For a structure or union the behaviour is as if the value had
1676 been stored to word-aligned memory and then loaded into
1677 registers with 64-bit load instruction(s). */
1678 int len = TYPE_LENGTH (type);
1679 int regno = AARCH64_X0_REGNUM;
1680 bfd_byte buf[X_REGISTER_SIZE];
1681
1682 while (len > 0)
1683 {
1684 regcache_cooked_read (regs, regno++, buf);
1685 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1686 len -= X_REGISTER_SIZE;
1687 valbuf += X_REGISTER_SIZE;
1688 }
1689 }
1690 }
1691
1692
1693 /* Will a function return an aggregate type in memory or in a
1694 register? Return 0 if an aggregate type can be returned in a
1695 register, 1 if it must be returned in memory. */
1696
1697 static int
1698 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1699 {
1700 type = check_typedef (type);
1701
1702 if (is_hfa_or_hva (type))
1703 {
1704 /* v0-v7 are used to return values and one register is allocated
1705 for one member. However, HFA or HVA has at most four members. */
1706 return 0;
1707 }
1708
1709 if (TYPE_LENGTH (type) > 16)
1710 {
1711 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1712 invisible reference. */
1713
1714 return 1;
1715 }
1716
1717 return 0;
1718 }
1719
1720 /* Write into appropriate registers a function return value of type
1721 TYPE, given in virtual format. */
1722
1723 static void
1724 aarch64_store_return_value (struct type *type, struct regcache *regs,
1725 const gdb_byte *valbuf)
1726 {
1727 struct gdbarch *gdbarch = get_regcache_arch (regs);
1728 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1729
1730 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1731 {
1732 bfd_byte buf[V_REGISTER_SIZE];
1733 int len = TYPE_LENGTH (type);
1734
1735 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1736 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1737 }
1738 else if (TYPE_CODE (type) == TYPE_CODE_INT
1739 || TYPE_CODE (type) == TYPE_CODE_CHAR
1740 || TYPE_CODE (type) == TYPE_CODE_BOOL
1741 || TYPE_CODE (type) == TYPE_CODE_PTR
1742 || TYPE_CODE (type) == TYPE_CODE_REF
1743 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1744 {
1745 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1746 {
1747 /* Values of one word or less are zero/sign-extended and
1748 returned in r0. */
1749 bfd_byte tmpbuf[X_REGISTER_SIZE];
1750 LONGEST val = unpack_long (type, valbuf);
1751
1752 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1753 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1754 }
1755 else
1756 {
1757 /* Integral values greater than one word are stored in
1758 consecutive registers starting with r0. This will always
1759 be a multiple of the regiser size. */
1760 int len = TYPE_LENGTH (type);
1761 int regno = AARCH64_X0_REGNUM;
1762
1763 while (len > 0)
1764 {
1765 regcache_cooked_write (regs, regno++, valbuf);
1766 len -= X_REGISTER_SIZE;
1767 valbuf += X_REGISTER_SIZE;
1768 }
1769 }
1770 }
1771 else if (is_hfa_or_hva (type))
1772 {
1773 int elements = TYPE_NFIELDS (type);
1774 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1775 int len = TYPE_LENGTH (member_type);
1776 int i;
1777
1778 for (i = 0; i < elements; i++)
1779 {
1780 int regno = AARCH64_V0_REGNUM + i;
1781 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1782
1783 if (aarch64_debug)
1784 {
1785 debug_printf ("write HFA or HVA return value element %d to %s\n",
1786 i + 1,
1787 gdbarch_register_name (gdbarch, regno));
1788 }
1789
1790 memcpy (tmpbuf, valbuf, len);
1791 regcache_cooked_write (regs, regno, tmpbuf);
1792 valbuf += len;
1793 }
1794 }
1795 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1796 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1797 {
1798 /* Short vector. */
1799 gdb_byte buf[V_REGISTER_SIZE];
1800
1801 memcpy (buf, valbuf, TYPE_LENGTH (type));
1802 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1803 }
1804 else
1805 {
1806 /* For a structure or union the behaviour is as if the value had
1807 been stored to word-aligned memory and then loaded into
1808 registers with 64-bit load instruction(s). */
1809 int len = TYPE_LENGTH (type);
1810 int regno = AARCH64_X0_REGNUM;
1811 bfd_byte tmpbuf[X_REGISTER_SIZE];
1812
1813 while (len > 0)
1814 {
1815 memcpy (tmpbuf, valbuf,
1816 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1817 regcache_cooked_write (regs, regno++, tmpbuf);
1818 len -= X_REGISTER_SIZE;
1819 valbuf += X_REGISTER_SIZE;
1820 }
1821 }
1822 }
1823
1824 /* Implement the "return_value" gdbarch method. */
1825
1826 static enum return_value_convention
1827 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1828 struct type *valtype, struct regcache *regcache,
1829 gdb_byte *readbuf, const gdb_byte *writebuf)
1830 {
1831
1832 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1833 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1834 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1835 {
1836 if (aarch64_return_in_memory (gdbarch, valtype))
1837 {
1838 if (aarch64_debug)
1839 debug_printf ("return value in memory\n");
1840 return RETURN_VALUE_STRUCT_CONVENTION;
1841 }
1842 }
1843
1844 if (writebuf)
1845 aarch64_store_return_value (valtype, regcache, writebuf);
1846
1847 if (readbuf)
1848 aarch64_extract_return_value (valtype, regcache, readbuf);
1849
1850 if (aarch64_debug)
1851 debug_printf ("return value in registers\n");
1852
1853 return RETURN_VALUE_REGISTER_CONVENTION;
1854 }
1855
1856 /* Implement the "get_longjmp_target" gdbarch method. */
1857
1858 static int
1859 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1860 {
1861 CORE_ADDR jb_addr;
1862 gdb_byte buf[X_REGISTER_SIZE];
1863 struct gdbarch *gdbarch = get_frame_arch (frame);
1864 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1865 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1866
1867 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1868
1869 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1870 X_REGISTER_SIZE))
1871 return 0;
1872
1873 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1874 return 1;
1875 }
1876
1877 /* Implement the "gen_return_address" gdbarch method. */
1878
1879 static void
1880 aarch64_gen_return_address (struct gdbarch *gdbarch,
1881 struct agent_expr *ax, struct axs_value *value,
1882 CORE_ADDR scope)
1883 {
1884 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1885 value->kind = axs_lvalue_register;
1886 value->u.reg = AARCH64_LR_REGNUM;
1887 }
1888 \f
1889
1890 /* Return the pseudo register name corresponding to register regnum. */
1891
1892 static const char *
1893 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1894 {
1895 static const char *const q_name[] =
1896 {
1897 "q0", "q1", "q2", "q3",
1898 "q4", "q5", "q6", "q7",
1899 "q8", "q9", "q10", "q11",
1900 "q12", "q13", "q14", "q15",
1901 "q16", "q17", "q18", "q19",
1902 "q20", "q21", "q22", "q23",
1903 "q24", "q25", "q26", "q27",
1904 "q28", "q29", "q30", "q31",
1905 };
1906
1907 static const char *const d_name[] =
1908 {
1909 "d0", "d1", "d2", "d3",
1910 "d4", "d5", "d6", "d7",
1911 "d8", "d9", "d10", "d11",
1912 "d12", "d13", "d14", "d15",
1913 "d16", "d17", "d18", "d19",
1914 "d20", "d21", "d22", "d23",
1915 "d24", "d25", "d26", "d27",
1916 "d28", "d29", "d30", "d31",
1917 };
1918
1919 static const char *const s_name[] =
1920 {
1921 "s0", "s1", "s2", "s3",
1922 "s4", "s5", "s6", "s7",
1923 "s8", "s9", "s10", "s11",
1924 "s12", "s13", "s14", "s15",
1925 "s16", "s17", "s18", "s19",
1926 "s20", "s21", "s22", "s23",
1927 "s24", "s25", "s26", "s27",
1928 "s28", "s29", "s30", "s31",
1929 };
1930
1931 static const char *const h_name[] =
1932 {
1933 "h0", "h1", "h2", "h3",
1934 "h4", "h5", "h6", "h7",
1935 "h8", "h9", "h10", "h11",
1936 "h12", "h13", "h14", "h15",
1937 "h16", "h17", "h18", "h19",
1938 "h20", "h21", "h22", "h23",
1939 "h24", "h25", "h26", "h27",
1940 "h28", "h29", "h30", "h31",
1941 };
1942
1943 static const char *const b_name[] =
1944 {
1945 "b0", "b1", "b2", "b3",
1946 "b4", "b5", "b6", "b7",
1947 "b8", "b9", "b10", "b11",
1948 "b12", "b13", "b14", "b15",
1949 "b16", "b17", "b18", "b19",
1950 "b20", "b21", "b22", "b23",
1951 "b24", "b25", "b26", "b27",
1952 "b28", "b29", "b30", "b31",
1953 };
1954
1955 regnum -= gdbarch_num_regs (gdbarch);
1956
1957 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1958 return q_name[regnum - AARCH64_Q0_REGNUM];
1959
1960 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1961 return d_name[regnum - AARCH64_D0_REGNUM];
1962
1963 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1964 return s_name[regnum - AARCH64_S0_REGNUM];
1965
1966 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1967 return h_name[regnum - AARCH64_H0_REGNUM];
1968
1969 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1970 return b_name[regnum - AARCH64_B0_REGNUM];
1971
1972 internal_error (__FILE__, __LINE__,
1973 _("aarch64_pseudo_register_name: bad register number %d"),
1974 regnum);
1975 }
1976
1977 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
1978
1979 static struct type *
1980 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1981 {
1982 regnum -= gdbarch_num_regs (gdbarch);
1983
1984 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1985 return aarch64_vnq_type (gdbarch);
1986
1987 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1988 return aarch64_vnd_type (gdbarch);
1989
1990 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1991 return aarch64_vns_type (gdbarch);
1992
1993 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1994 return aarch64_vnh_type (gdbarch);
1995
1996 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1997 return aarch64_vnb_type (gdbarch);
1998
1999 internal_error (__FILE__, __LINE__,
2000 _("aarch64_pseudo_register_type: bad register number %d"),
2001 regnum);
2002 }
2003
2004 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2005
2006 static int
2007 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2008 struct reggroup *group)
2009 {
2010 regnum -= gdbarch_num_regs (gdbarch);
2011
2012 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2013 return group == all_reggroup || group == vector_reggroup;
2014 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2015 return (group == all_reggroup || group == vector_reggroup
2016 || group == float_reggroup);
2017 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2018 return (group == all_reggroup || group == vector_reggroup
2019 || group == float_reggroup);
2020 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2021 return group == all_reggroup || group == vector_reggroup;
2022 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2023 return group == all_reggroup || group == vector_reggroup;
2024
2025 return group == all_reggroup;
2026 }
2027
2028 /* Implement the "pseudo_register_read_value" gdbarch method. */
2029
2030 static struct value *
2031 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2032 struct regcache *regcache,
2033 int regnum)
2034 {
2035 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2036 struct value *result_value;
2037 gdb_byte *buf;
2038
2039 result_value = allocate_value (register_type (gdbarch, regnum));
2040 VALUE_LVAL (result_value) = lval_register;
2041 VALUE_REGNUM (result_value) = regnum;
2042 buf = value_contents_raw (result_value);
2043
2044 regnum -= gdbarch_num_regs (gdbarch);
2045
2046 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2047 {
2048 enum register_status status;
2049 unsigned v_regnum;
2050
2051 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2052 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2053 if (status != REG_VALID)
2054 mark_value_bytes_unavailable (result_value, 0,
2055 TYPE_LENGTH (value_type (result_value)));
2056 else
2057 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2058 return result_value;
2059 }
2060
2061 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2062 {
2063 enum register_status status;
2064 unsigned v_regnum;
2065
2066 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2067 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2068 if (status != REG_VALID)
2069 mark_value_bytes_unavailable (result_value, 0,
2070 TYPE_LENGTH (value_type (result_value)));
2071 else
2072 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2073 return result_value;
2074 }
2075
2076 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2077 {
2078 enum register_status status;
2079 unsigned v_regnum;
2080
2081 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2082 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2083 if (status != REG_VALID)
2084 mark_value_bytes_unavailable (result_value, 0,
2085 TYPE_LENGTH (value_type (result_value)));
2086 else
2087 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2088 return result_value;
2089 }
2090
2091 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2092 {
2093 enum register_status status;
2094 unsigned v_regnum;
2095
2096 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2097 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2098 if (status != REG_VALID)
2099 mark_value_bytes_unavailable (result_value, 0,
2100 TYPE_LENGTH (value_type (result_value)));
2101 else
2102 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2103 return result_value;
2104 }
2105
2106 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2107 {
2108 enum register_status status;
2109 unsigned v_regnum;
2110
2111 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2112 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2113 if (status != REG_VALID)
2114 mark_value_bytes_unavailable (result_value, 0,
2115 TYPE_LENGTH (value_type (result_value)));
2116 else
2117 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2118 return result_value;
2119 }
2120
2121 gdb_assert_not_reached ("regnum out of bound");
2122 }
2123
2124 /* Implement the "pseudo_register_write" gdbarch method. */
2125
2126 static void
2127 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2128 int regnum, const gdb_byte *buf)
2129 {
2130 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2131
2132 /* Ensure the register buffer is zero, we want gdb writes of the
2133 various 'scalar' pseudo registers to behavior like architectural
2134 writes, register width bytes are written the remainder are set to
2135 zero. */
2136 memset (reg_buf, 0, sizeof (reg_buf));
2137
2138 regnum -= gdbarch_num_regs (gdbarch);
2139
2140 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2141 {
2142 /* pseudo Q registers */
2143 unsigned v_regnum;
2144
2145 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2146 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2147 regcache_raw_write (regcache, v_regnum, reg_buf);
2148 return;
2149 }
2150
2151 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2152 {
2153 /* pseudo D registers */
2154 unsigned v_regnum;
2155
2156 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2157 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2158 regcache_raw_write (regcache, v_regnum, reg_buf);
2159 return;
2160 }
2161
2162 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2163 {
2164 unsigned v_regnum;
2165
2166 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2167 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2168 regcache_raw_write (regcache, v_regnum, reg_buf);
2169 return;
2170 }
2171
2172 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2173 {
2174 /* pseudo H registers */
2175 unsigned v_regnum;
2176
2177 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2178 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2179 regcache_raw_write (regcache, v_regnum, reg_buf);
2180 return;
2181 }
2182
2183 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2184 {
2185 /* pseudo B registers */
2186 unsigned v_regnum;
2187
2188 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2189 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2190 regcache_raw_write (regcache, v_regnum, reg_buf);
2191 return;
2192 }
2193
2194 gdb_assert_not_reached ("regnum out of bound");
2195 }
2196
2197 /* Callback function for user_reg_add. */
2198
2199 static struct value *
2200 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2201 {
2202 const int *reg_p = (const int *) baton;
2203
2204 return value_of_register (*reg_p, frame);
2205 }
2206 \f
2207
2208 /* Implement the "software_single_step" gdbarch method, needed to
2209 single step through atomic sequences on AArch64. */
2210
2211 static int
2212 aarch64_software_single_step (struct frame_info *frame)
2213 {
2214 struct gdbarch *gdbarch = get_frame_arch (frame);
2215 struct address_space *aspace = get_frame_address_space (frame);
2216 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2217 const int insn_size = 4;
2218 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2219 CORE_ADDR pc = get_frame_pc (frame);
2220 CORE_ADDR breaks[2] = { -1, -1 };
2221 CORE_ADDR loc = pc;
2222 CORE_ADDR closing_insn = 0;
2223 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2224 byte_order_for_code);
2225 int index;
2226 int insn_count;
2227 int bc_insn_count = 0; /* Conditional branch instruction count. */
2228 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2229 aarch64_inst inst;
2230
2231 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2232 return 0;
2233
2234 /* Look for a Load Exclusive instruction which begins the sequence. */
2235 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2236 return 0;
2237
2238 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2239 {
2240 loc += insn_size;
2241 insn = read_memory_unsigned_integer (loc, insn_size,
2242 byte_order_for_code);
2243
2244 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2245 return 0;
2246 /* Check if the instruction is a conditional branch. */
2247 if (inst.opcode->iclass == condbranch)
2248 {
2249 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2250
2251 if (bc_insn_count >= 1)
2252 return 0;
2253
2254 /* It is, so we'll try to set a breakpoint at the destination. */
2255 breaks[1] = loc + inst.operands[0].imm.value;
2256
2257 bc_insn_count++;
2258 last_breakpoint++;
2259 }
2260
2261 /* Look for the Store Exclusive which closes the atomic sequence. */
2262 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2263 {
2264 closing_insn = loc;
2265 break;
2266 }
2267 }
2268
2269 /* We didn't find a closing Store Exclusive instruction, fall back. */
2270 if (!closing_insn)
2271 return 0;
2272
2273 /* Insert breakpoint after the end of the atomic sequence. */
2274 breaks[0] = loc + insn_size;
2275
2276 /* Check for duplicated breakpoints, and also check that the second
2277 breakpoint is not within the atomic sequence. */
2278 if (last_breakpoint
2279 && (breaks[1] == breaks[0]
2280 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2281 last_breakpoint = 0;
2282
2283 /* Insert the breakpoint at the end of the sequence, and one at the
2284 destination of the conditional branch, if it exists. */
2285 for (index = 0; index <= last_breakpoint; index++)
2286 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2287
2288 return 1;
2289 }
2290
2291 struct displaced_step_closure
2292 {
2293 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2294 is being displaced stepping. */
2295 int cond;
2296
2297 /* PC adjustment offset after displaced stepping. */
2298 int32_t pc_adjust;
2299 };
2300
2301 /* Data when visiting instructions for displaced stepping. */
2302
2303 struct aarch64_displaced_step_data
2304 {
2305 struct aarch64_insn_data base;
2306
2307 /* The address where the instruction will be executed at. */
2308 CORE_ADDR new_addr;
2309 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2310 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2311 /* Number of instructions in INSN_BUF. */
2312 unsigned insn_count;
2313 /* Registers when doing displaced stepping. */
2314 struct regcache *regs;
2315
2316 struct displaced_step_closure *dsc;
2317 };
2318
2319 /* Implementation of aarch64_insn_visitor method "b". */
2320
2321 static void
2322 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2323 struct aarch64_insn_data *data)
2324 {
2325 struct aarch64_displaced_step_data *dsd
2326 = (struct aarch64_displaced_step_data *) data;
2327 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2328
2329 if (can_encode_int32 (new_offset, 28))
2330 {
2331 /* Emit B rather than BL, because executing BL on a new address
2332 will get the wrong address into LR. In order to avoid this,
2333 we emit B, and update LR if the instruction is BL. */
2334 emit_b (dsd->insn_buf, 0, new_offset);
2335 dsd->insn_count++;
2336 }
2337 else
2338 {
2339 /* Write NOP. */
2340 emit_nop (dsd->insn_buf);
2341 dsd->insn_count++;
2342 dsd->dsc->pc_adjust = offset;
2343 }
2344
2345 if (is_bl)
2346 {
2347 /* Update LR. */
2348 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2349 data->insn_addr + 4);
2350 }
2351 }
2352
2353 /* Implementation of aarch64_insn_visitor method "b_cond". */
2354
2355 static void
2356 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2357 struct aarch64_insn_data *data)
2358 {
2359 struct aarch64_displaced_step_data *dsd
2360 = (struct aarch64_displaced_step_data *) data;
2361
2362 /* GDB has to fix up PC after displaced step this instruction
2363 differently according to the condition is true or false. Instead
2364 of checking COND against conditional flags, we can use
2365 the following instructions, and GDB can tell how to fix up PC
2366 according to the PC value.
2367
2368 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2369 INSN1 ;
2370 TAKEN:
2371 INSN2
2372 */
2373
2374 emit_bcond (dsd->insn_buf, cond, 8);
2375 dsd->dsc->cond = 1;
2376 dsd->dsc->pc_adjust = offset;
2377 dsd->insn_count = 1;
2378 }
2379
2380 /* Dynamically allocate a new register. If we know the register
2381 statically, we should make it a global as above instead of using this
2382 helper function. */
2383
2384 static struct aarch64_register
2385 aarch64_register (unsigned num, int is64)
2386 {
2387 return (struct aarch64_register) { num, is64 };
2388 }
2389
2390 /* Implementation of aarch64_insn_visitor method "cb". */
2391
2392 static void
2393 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2394 const unsigned rn, int is64,
2395 struct aarch64_insn_data *data)
2396 {
2397 struct aarch64_displaced_step_data *dsd
2398 = (struct aarch64_displaced_step_data *) data;
2399
2400 /* The offset is out of range for a compare and branch
2401 instruction. We can use the following instructions instead:
2402
2403 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2404 INSN1 ;
2405 TAKEN:
2406 INSN2
2407 */
2408 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2409 dsd->insn_count = 1;
2410 dsd->dsc->cond = 1;
2411 dsd->dsc->pc_adjust = offset;
2412 }
2413
2414 /* Implementation of aarch64_insn_visitor method "tb". */
2415
2416 static void
2417 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2418 const unsigned rt, unsigned bit,
2419 struct aarch64_insn_data *data)
2420 {
2421 struct aarch64_displaced_step_data *dsd
2422 = (struct aarch64_displaced_step_data *) data;
2423
2424 /* The offset is out of range for a test bit and branch
2425 instruction We can use the following instructions instead:
2426
2427 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2428 INSN1 ;
2429 TAKEN:
2430 INSN2
2431
2432 */
2433 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2434 dsd->insn_count = 1;
2435 dsd->dsc->cond = 1;
2436 dsd->dsc->pc_adjust = offset;
2437 }
2438
2439 /* Implementation of aarch64_insn_visitor method "adr". */
2440
2441 static void
2442 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2443 const int is_adrp, struct aarch64_insn_data *data)
2444 {
2445 struct aarch64_displaced_step_data *dsd
2446 = (struct aarch64_displaced_step_data *) data;
2447 /* We know exactly the address the ADR{P,} instruction will compute.
2448 We can just write it to the destination register. */
2449 CORE_ADDR address = data->insn_addr + offset;
2450
2451 if (is_adrp)
2452 {
2453 /* Clear the lower 12 bits of the offset to get the 4K page. */
2454 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2455 address & ~0xfff);
2456 }
2457 else
2458 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2459 address);
2460
2461 dsd->dsc->pc_adjust = 4;
2462 emit_nop (dsd->insn_buf);
2463 dsd->insn_count = 1;
2464 }
2465
2466 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2467
2468 static void
2469 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2470 const unsigned rt, const int is64,
2471 struct aarch64_insn_data *data)
2472 {
2473 struct aarch64_displaced_step_data *dsd
2474 = (struct aarch64_displaced_step_data *) data;
2475 CORE_ADDR address = data->insn_addr + offset;
2476 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2477
2478 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2479 address);
2480
2481 if (is_sw)
2482 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2483 aarch64_register (rt, 1), zero);
2484 else
2485 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2486 aarch64_register (rt, 1), zero);
2487
2488 dsd->dsc->pc_adjust = 4;
2489 }
2490
2491 /* Implementation of aarch64_insn_visitor method "others". */
2492
2493 static void
2494 aarch64_displaced_step_others (const uint32_t insn,
2495 struct aarch64_insn_data *data)
2496 {
2497 struct aarch64_displaced_step_data *dsd
2498 = (struct aarch64_displaced_step_data *) data;
2499
2500 aarch64_emit_insn (dsd->insn_buf, insn);
2501 dsd->insn_count = 1;
2502
2503 if ((insn & 0xfffffc1f) == 0xd65f0000)
2504 {
2505 /* RET */
2506 dsd->dsc->pc_adjust = 0;
2507 }
2508 else
2509 dsd->dsc->pc_adjust = 4;
2510 }
2511
2512 static const struct aarch64_insn_visitor visitor =
2513 {
2514 aarch64_displaced_step_b,
2515 aarch64_displaced_step_b_cond,
2516 aarch64_displaced_step_cb,
2517 aarch64_displaced_step_tb,
2518 aarch64_displaced_step_adr,
2519 aarch64_displaced_step_ldr_literal,
2520 aarch64_displaced_step_others,
2521 };
2522
2523 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2524
2525 struct displaced_step_closure *
2526 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2527 CORE_ADDR from, CORE_ADDR to,
2528 struct regcache *regs)
2529 {
2530 struct displaced_step_closure *dsc = NULL;
2531 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2532 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2533 struct aarch64_displaced_step_data dsd;
2534 aarch64_inst inst;
2535
2536 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2537 return NULL;
2538
2539 /* Look for a Load Exclusive instruction which begins the sequence. */
2540 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2541 {
2542 /* We can't displaced step atomic sequences. */
2543 return NULL;
2544 }
2545
2546 dsc = XCNEW (struct displaced_step_closure);
2547 dsd.base.insn_addr = from;
2548 dsd.new_addr = to;
2549 dsd.regs = regs;
2550 dsd.dsc = dsc;
2551 dsd.insn_count = 0;
2552 aarch64_relocate_instruction (insn, &visitor,
2553 (struct aarch64_insn_data *) &dsd);
2554 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2555
2556 if (dsd.insn_count != 0)
2557 {
2558 int i;
2559
2560 /* Instruction can be relocated to scratch pad. Copy
2561 relocated instruction(s) there. */
2562 for (i = 0; i < dsd.insn_count; i++)
2563 {
2564 if (debug_displaced)
2565 {
2566 debug_printf ("displaced: writing insn ");
2567 debug_printf ("%.8x", dsd.insn_buf[i]);
2568 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2569 }
2570 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2571 (ULONGEST) dsd.insn_buf[i]);
2572 }
2573 }
2574 else
2575 {
2576 xfree (dsc);
2577 dsc = NULL;
2578 }
2579
2580 return dsc;
2581 }
2582
2583 /* Implement the "displaced_step_fixup" gdbarch method. */
2584
2585 void
2586 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2587 struct displaced_step_closure *dsc,
2588 CORE_ADDR from, CORE_ADDR to,
2589 struct regcache *regs)
2590 {
2591 if (dsc->cond)
2592 {
2593 ULONGEST pc;
2594
2595 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2596 if (pc - to == 8)
2597 {
2598 /* Condition is true. */
2599 }
2600 else if (pc - to == 4)
2601 {
2602 /* Condition is false. */
2603 dsc->pc_adjust = 4;
2604 }
2605 else
2606 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2607 }
2608
2609 if (dsc->pc_adjust != 0)
2610 {
2611 if (debug_displaced)
2612 {
2613 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2614 paddress (gdbarch, from), dsc->pc_adjust);
2615 }
2616 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2617 from + dsc->pc_adjust);
2618 }
2619 }
2620
2621 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2622
2623 int
2624 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2625 struct displaced_step_closure *closure)
2626 {
2627 return 1;
2628 }
2629
2630 /* Initialize the current architecture based on INFO. If possible,
2631 re-use an architecture from ARCHES, which is a list of
2632 architectures already created during this debugging session.
2633
2634 Called e.g. at program startup, when reading a core file, and when
2635 reading a binary file. */
2636
2637 static struct gdbarch *
2638 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2639 {
2640 struct gdbarch_tdep *tdep;
2641 struct gdbarch *gdbarch;
2642 struct gdbarch_list *best_arch;
2643 struct tdesc_arch_data *tdesc_data = NULL;
2644 const struct target_desc *tdesc = info.target_desc;
2645 int i;
2646 int valid_p = 1;
2647 const struct tdesc_feature *feature;
2648 int num_regs = 0;
2649 int num_pseudo_regs = 0;
2650
2651 /* Ensure we always have a target descriptor. */
2652 if (!tdesc_has_registers (tdesc))
2653 tdesc = tdesc_aarch64;
2654
2655 gdb_assert (tdesc);
2656
2657 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2658
2659 if (feature == NULL)
2660 return NULL;
2661
2662 tdesc_data = tdesc_data_alloc ();
2663
2664 /* Validate the descriptor provides the mandatory core R registers
2665 and allocate their numbers. */
2666 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2667 valid_p &=
2668 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2669 aarch64_r_register_names[i]);
2670
2671 num_regs = AARCH64_X0_REGNUM + i;
2672
2673 /* Look for the V registers. */
2674 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2675 if (feature)
2676 {
2677 /* Validate the descriptor provides the mandatory V registers
2678 and allocate their numbers. */
2679 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2680 valid_p &=
2681 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2682 aarch64_v_register_names[i]);
2683
2684 num_regs = AARCH64_V0_REGNUM + i;
2685
2686 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2687 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2688 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2689 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2690 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2691 }
2692
2693 if (!valid_p)
2694 {
2695 tdesc_data_cleanup (tdesc_data);
2696 return NULL;
2697 }
2698
2699 /* AArch64 code is always little-endian. */
2700 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2701
2702 /* If there is already a candidate, use it. */
2703 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2704 best_arch != NULL;
2705 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2706 {
2707 /* Found a match. */
2708 break;
2709 }
2710
2711 if (best_arch != NULL)
2712 {
2713 if (tdesc_data != NULL)
2714 tdesc_data_cleanup (tdesc_data);
2715 return best_arch->gdbarch;
2716 }
2717
2718 tdep = XCNEW (struct gdbarch_tdep);
2719 gdbarch = gdbarch_alloc (&info, tdep);
2720
2721 /* This should be low enough for everything. */
2722 tdep->lowest_pc = 0x20;
2723 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2724 tdep->jb_elt_size = 8;
2725
2726 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2727 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2728
2729 /* Frame handling. */
2730 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2731 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2732 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2733
2734 /* Advance PC across function entry code. */
2735 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2736
2737 /* The stack grows downward. */
2738 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2739
2740 /* Breakpoint manipulation. */
2741 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2742 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2743 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2744
2745 /* Information about registers, etc. */
2746 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2747 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2748 set_gdbarch_num_regs (gdbarch, num_regs);
2749
2750 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2751 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2752 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2753 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2754 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2755 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2756 aarch64_pseudo_register_reggroup_p);
2757
2758 /* ABI */
2759 set_gdbarch_short_bit (gdbarch, 16);
2760 set_gdbarch_int_bit (gdbarch, 32);
2761 set_gdbarch_float_bit (gdbarch, 32);
2762 set_gdbarch_double_bit (gdbarch, 64);
2763 set_gdbarch_long_double_bit (gdbarch, 128);
2764 set_gdbarch_long_bit (gdbarch, 64);
2765 set_gdbarch_long_long_bit (gdbarch, 64);
2766 set_gdbarch_ptr_bit (gdbarch, 64);
2767 set_gdbarch_char_signed (gdbarch, 0);
2768 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2769 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2770 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2771
2772 /* Internal <-> external register number maps. */
2773 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2774
2775 /* Returning results. */
2776 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2777
2778 /* Disassembly. */
2779 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2780
2781 /* Virtual tables. */
2782 set_gdbarch_vbit_in_delta (gdbarch, 1);
2783
2784 /* Hook in the ABI-specific overrides, if they have been registered. */
2785 info.target_desc = tdesc;
2786 info.tdep_info = (void *) tdesc_data;
2787 gdbarch_init_osabi (info, gdbarch);
2788
2789 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2790
2791 /* Add some default predicates. */
2792 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2793 dwarf2_append_unwinders (gdbarch);
2794 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2795
2796 frame_base_set_default (gdbarch, &aarch64_normal_base);
2797
2798 /* Now we have tuned the configuration, set a few final things,
2799 based on what the OS ABI has told us. */
2800
2801 if (tdep->jb_pc >= 0)
2802 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2803
2804 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2805
2806 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2807
2808 /* Add standard register aliases. */
2809 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2810 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2811 value_of_aarch64_user_reg,
2812 &aarch64_register_aliases[i].regnum);
2813
2814 return gdbarch;
2815 }
2816
2817 static void
2818 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2819 {
2820 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2821
2822 if (tdep == NULL)
2823 return;
2824
2825 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2826 paddress (gdbarch, tdep->lowest_pc));
2827 }
2828
2829 /* Suppress warning from -Wmissing-prototypes. */
2830 extern initialize_file_ftype _initialize_aarch64_tdep;
2831
2832 void
2833 _initialize_aarch64_tdep (void)
2834 {
2835 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2836 aarch64_dump_tdep);
2837
2838 initialize_tdesc_aarch64 ();
2839
2840 /* Debug this file's internals. */
2841 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2842 Set AArch64 debugging."), _("\
2843 Show AArch64 debugging."), _("\
2844 When on, AArch64 specific debugging is enabled."),
2845 NULL,
2846 show_aarch64_debug,
2847 &setdebuglist, &showdebuglist);
2848 }
2849
2850 /* AArch64 process record-replay related structures, defines etc. */
2851
2852 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2853 do \
2854 { \
2855 unsigned int reg_len = LENGTH; \
2856 if (reg_len) \
2857 { \
2858 REGS = XNEWVEC (uint32_t, reg_len); \
2859 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2860 } \
2861 } \
2862 while (0)
2863
2864 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2865 do \
2866 { \
2867 unsigned int mem_len = LENGTH; \
2868 if (mem_len) \
2869 { \
2870 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2871 memcpy(&MEMS->len, &RECORD_BUF[0], \
2872 sizeof(struct aarch64_mem_r) * LENGTH); \
2873 } \
2874 } \
2875 while (0)
2876
2877 /* AArch64 record/replay structures and enumerations. */
2878
2879 struct aarch64_mem_r
2880 {
2881 uint64_t len; /* Record length. */
2882 uint64_t addr; /* Memory address. */
2883 };
2884
2885 enum aarch64_record_result
2886 {
2887 AARCH64_RECORD_SUCCESS,
2888 AARCH64_RECORD_FAILURE,
2889 AARCH64_RECORD_UNSUPPORTED,
2890 AARCH64_RECORD_UNKNOWN
2891 };
2892
2893 typedef struct insn_decode_record_t
2894 {
2895 struct gdbarch *gdbarch;
2896 struct regcache *regcache;
2897 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2898 uint32_t aarch64_insn; /* Insn to be recorded. */
2899 uint32_t mem_rec_count; /* Count of memory records. */
2900 uint32_t reg_rec_count; /* Count of register records. */
2901 uint32_t *aarch64_regs; /* Registers to be recorded. */
2902 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2903 } insn_decode_record;
2904
2905 /* Record handler for data processing - register instructions. */
2906
2907 static unsigned int
2908 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2909 {
2910 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2911 uint32_t record_buf[4];
2912
2913 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2914 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2915 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2916
2917 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2918 {
2919 uint8_t setflags;
2920
2921 /* Logical (shifted register). */
2922 if (insn_bits24_27 == 0x0a)
2923 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2924 /* Add/subtract. */
2925 else if (insn_bits24_27 == 0x0b)
2926 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2927 else
2928 return AARCH64_RECORD_UNKNOWN;
2929
2930 record_buf[0] = reg_rd;
2931 aarch64_insn_r->reg_rec_count = 1;
2932 if (setflags)
2933 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2934 }
2935 else
2936 {
2937 if (insn_bits24_27 == 0x0b)
2938 {
2939 /* Data-processing (3 source). */
2940 record_buf[0] = reg_rd;
2941 aarch64_insn_r->reg_rec_count = 1;
2942 }
2943 else if (insn_bits24_27 == 0x0a)
2944 {
2945 if (insn_bits21_23 == 0x00)
2946 {
2947 /* Add/subtract (with carry). */
2948 record_buf[0] = reg_rd;
2949 aarch64_insn_r->reg_rec_count = 1;
2950 if (bit (aarch64_insn_r->aarch64_insn, 29))
2951 {
2952 record_buf[1] = AARCH64_CPSR_REGNUM;
2953 aarch64_insn_r->reg_rec_count = 2;
2954 }
2955 }
2956 else if (insn_bits21_23 == 0x02)
2957 {
2958 /* Conditional compare (register) and conditional compare
2959 (immediate) instructions. */
2960 record_buf[0] = AARCH64_CPSR_REGNUM;
2961 aarch64_insn_r->reg_rec_count = 1;
2962 }
2963 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2964 {
2965 /* CConditional select. */
2966 /* Data-processing (2 source). */
2967 /* Data-processing (1 source). */
2968 record_buf[0] = reg_rd;
2969 aarch64_insn_r->reg_rec_count = 1;
2970 }
2971 else
2972 return AARCH64_RECORD_UNKNOWN;
2973 }
2974 }
2975
2976 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2977 record_buf);
2978 return AARCH64_RECORD_SUCCESS;
2979 }
2980
2981 /* Record handler for data processing - immediate instructions. */
2982
2983 static unsigned int
2984 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2985 {
2986 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
2987 uint32_t record_buf[4];
2988
2989 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2990 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2991 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2992
2993 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2994 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2995 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2996 {
2997 record_buf[0] = reg_rd;
2998 aarch64_insn_r->reg_rec_count = 1;
2999 }
3000 else if (insn_bits24_27 == 0x01)
3001 {
3002 /* Add/Subtract (immediate). */
3003 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3004 record_buf[0] = reg_rd;
3005 aarch64_insn_r->reg_rec_count = 1;
3006 if (setflags)
3007 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3008 }
3009 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3010 {
3011 /* Logical (immediate). */
3012 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3013 record_buf[0] = reg_rd;
3014 aarch64_insn_r->reg_rec_count = 1;
3015 if (setflags)
3016 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3017 }
3018 else
3019 return AARCH64_RECORD_UNKNOWN;
3020
3021 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3022 record_buf);
3023 return AARCH64_RECORD_SUCCESS;
3024 }
3025
3026 /* Record handler for branch, exception generation and system instructions. */
3027
3028 static unsigned int
3029 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3030 {
3031 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3032 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3033 uint32_t record_buf[4];
3034
3035 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3036 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3037 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3038
3039 if (insn_bits28_31 == 0x0d)
3040 {
3041 /* Exception generation instructions. */
3042 if (insn_bits24_27 == 0x04)
3043 {
3044 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3045 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3046 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3047 {
3048 ULONGEST svc_number;
3049
3050 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3051 &svc_number);
3052 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3053 svc_number);
3054 }
3055 else
3056 return AARCH64_RECORD_UNSUPPORTED;
3057 }
3058 /* System instructions. */
3059 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3060 {
3061 uint32_t reg_rt, reg_crn;
3062
3063 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3064 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3065
3066 /* Record rt in case of sysl and mrs instructions. */
3067 if (bit (aarch64_insn_r->aarch64_insn, 21))
3068 {
3069 record_buf[0] = reg_rt;
3070 aarch64_insn_r->reg_rec_count = 1;
3071 }
3072 /* Record cpsr for hint and msr(immediate) instructions. */
3073 else if (reg_crn == 0x02 || reg_crn == 0x04)
3074 {
3075 record_buf[0] = AARCH64_CPSR_REGNUM;
3076 aarch64_insn_r->reg_rec_count = 1;
3077 }
3078 }
3079 /* Unconditional branch (register). */
3080 else if((insn_bits24_27 & 0x0e) == 0x06)
3081 {
3082 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3083 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3084 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3085 }
3086 else
3087 return AARCH64_RECORD_UNKNOWN;
3088 }
3089 /* Unconditional branch (immediate). */
3090 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3091 {
3092 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3093 if (bit (aarch64_insn_r->aarch64_insn, 31))
3094 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3095 }
3096 else
3097 /* Compare & branch (immediate), Test & branch (immediate) and
3098 Conditional branch (immediate). */
3099 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3100
3101 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3102 record_buf);
3103 return AARCH64_RECORD_SUCCESS;
3104 }
3105
3106 /* Record handler for advanced SIMD load and store instructions. */
3107
3108 static unsigned int
3109 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3110 {
3111 CORE_ADDR address;
3112 uint64_t addr_offset = 0;
3113 uint32_t record_buf[24];
3114 uint64_t record_buf_mem[24];
3115 uint32_t reg_rn, reg_rt;
3116 uint32_t reg_index = 0, mem_index = 0;
3117 uint8_t opcode_bits, size_bits;
3118
3119 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3120 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3121 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3122 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3123 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3124
3125 if (record_debug)
3126 debug_printf ("Process record: Advanced SIMD load/store\n");
3127
3128 /* Load/store single structure. */
3129 if (bit (aarch64_insn_r->aarch64_insn, 24))
3130 {
3131 uint8_t sindex, scale, selem, esize, replicate = 0;
3132 scale = opcode_bits >> 2;
3133 selem = ((opcode_bits & 0x02) |
3134 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3135 switch (scale)
3136 {
3137 case 1:
3138 if (size_bits & 0x01)
3139 return AARCH64_RECORD_UNKNOWN;
3140 break;
3141 case 2:
3142 if ((size_bits >> 1) & 0x01)
3143 return AARCH64_RECORD_UNKNOWN;
3144 if (size_bits & 0x01)
3145 {
3146 if (!((opcode_bits >> 1) & 0x01))
3147 scale = 3;
3148 else
3149 return AARCH64_RECORD_UNKNOWN;
3150 }
3151 break;
3152 case 3:
3153 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3154 {
3155 scale = size_bits;
3156 replicate = 1;
3157 break;
3158 }
3159 else
3160 return AARCH64_RECORD_UNKNOWN;
3161 default:
3162 break;
3163 }
3164 esize = 8 << scale;
3165 if (replicate)
3166 for (sindex = 0; sindex < selem; sindex++)
3167 {
3168 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3169 reg_rt = (reg_rt + 1) % 32;
3170 }
3171 else
3172 {
3173 for (sindex = 0; sindex < selem; sindex++)
3174 {
3175 if (bit (aarch64_insn_r->aarch64_insn, 22))
3176 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3177 else
3178 {
3179 record_buf_mem[mem_index++] = esize / 8;
3180 record_buf_mem[mem_index++] = address + addr_offset;
3181 }
3182 addr_offset = addr_offset + (esize / 8);
3183 reg_rt = (reg_rt + 1) % 32;
3184 }
3185 }
3186 }
3187 /* Load/store multiple structure. */
3188 else
3189 {
3190 uint8_t selem, esize, rpt, elements;
3191 uint8_t eindex, rindex;
3192
3193 esize = 8 << size_bits;
3194 if (bit (aarch64_insn_r->aarch64_insn, 30))
3195 elements = 128 / esize;
3196 else
3197 elements = 64 / esize;
3198
3199 switch (opcode_bits)
3200 {
3201 /*LD/ST4 (4 Registers). */
3202 case 0:
3203 rpt = 1;
3204 selem = 4;
3205 break;
3206 /*LD/ST1 (4 Registers). */
3207 case 2:
3208 rpt = 4;
3209 selem = 1;
3210 break;
3211 /*LD/ST3 (3 Registers). */
3212 case 4:
3213 rpt = 1;
3214 selem = 3;
3215 break;
3216 /*LD/ST1 (3 Registers). */
3217 case 6:
3218 rpt = 3;
3219 selem = 1;
3220 break;
3221 /*LD/ST1 (1 Register). */
3222 case 7:
3223 rpt = 1;
3224 selem = 1;
3225 break;
3226 /*LD/ST2 (2 Registers). */
3227 case 8:
3228 rpt = 1;
3229 selem = 2;
3230 break;
3231 /*LD/ST1 (2 Registers). */
3232 case 10:
3233 rpt = 2;
3234 selem = 1;
3235 break;
3236 default:
3237 return AARCH64_RECORD_UNSUPPORTED;
3238 break;
3239 }
3240 for (rindex = 0; rindex < rpt; rindex++)
3241 for (eindex = 0; eindex < elements; eindex++)
3242 {
3243 uint8_t reg_tt, sindex;
3244 reg_tt = (reg_rt + rindex) % 32;
3245 for (sindex = 0; sindex < selem; sindex++)
3246 {
3247 if (bit (aarch64_insn_r->aarch64_insn, 22))
3248 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3249 else
3250 {
3251 record_buf_mem[mem_index++] = esize / 8;
3252 record_buf_mem[mem_index++] = address + addr_offset;
3253 }
3254 addr_offset = addr_offset + (esize / 8);
3255 reg_tt = (reg_tt + 1) % 32;
3256 }
3257 }
3258 }
3259
3260 if (bit (aarch64_insn_r->aarch64_insn, 23))
3261 record_buf[reg_index++] = reg_rn;
3262
3263 aarch64_insn_r->reg_rec_count = reg_index;
3264 aarch64_insn_r->mem_rec_count = mem_index / 2;
3265 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3266 record_buf_mem);
3267 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3268 record_buf);
3269 return AARCH64_RECORD_SUCCESS;
3270 }
3271
3272 /* Record handler for load and store instructions. */
3273
3274 static unsigned int
3275 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3276 {
3277 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3278 uint8_t insn_bit23, insn_bit21;
3279 uint8_t opc, size_bits, ld_flag, vector_flag;
3280 uint32_t reg_rn, reg_rt, reg_rt2;
3281 uint64_t datasize, offset;
3282 uint32_t record_buf[8];
3283 uint64_t record_buf_mem[8];
3284 CORE_ADDR address;
3285
3286 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3287 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3288 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3289 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3290 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3291 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3292 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3293 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3294 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3295 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3296 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3297
3298 /* Load/store exclusive. */
3299 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3300 {
3301 if (record_debug)
3302 debug_printf ("Process record: load/store exclusive\n");
3303
3304 if (ld_flag)
3305 {
3306 record_buf[0] = reg_rt;
3307 aarch64_insn_r->reg_rec_count = 1;
3308 if (insn_bit21)
3309 {
3310 record_buf[1] = reg_rt2;
3311 aarch64_insn_r->reg_rec_count = 2;
3312 }
3313 }
3314 else
3315 {
3316 if (insn_bit21)
3317 datasize = (8 << size_bits) * 2;
3318 else
3319 datasize = (8 << size_bits);
3320 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3321 &address);
3322 record_buf_mem[0] = datasize / 8;
3323 record_buf_mem[1] = address;
3324 aarch64_insn_r->mem_rec_count = 1;
3325 if (!insn_bit23)
3326 {
3327 /* Save register rs. */
3328 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3329 aarch64_insn_r->reg_rec_count = 1;
3330 }
3331 }
3332 }
3333 /* Load register (literal) instructions decoding. */
3334 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3335 {
3336 if (record_debug)
3337 debug_printf ("Process record: load register (literal)\n");
3338 if (vector_flag)
3339 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3340 else
3341 record_buf[0] = reg_rt;
3342 aarch64_insn_r->reg_rec_count = 1;
3343 }
3344 /* All types of load/store pair instructions decoding. */
3345 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3346 {
3347 if (record_debug)
3348 debug_printf ("Process record: load/store pair\n");
3349
3350 if (ld_flag)
3351 {
3352 if (vector_flag)
3353 {
3354 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3355 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3356 }
3357 else
3358 {
3359 record_buf[0] = reg_rt;
3360 record_buf[1] = reg_rt2;
3361 }
3362 aarch64_insn_r->reg_rec_count = 2;
3363 }
3364 else
3365 {
3366 uint16_t imm7_off;
3367 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3368 if (!vector_flag)
3369 size_bits = size_bits >> 1;
3370 datasize = 8 << (2 + size_bits);
3371 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3372 offset = offset << (2 + size_bits);
3373 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3374 &address);
3375 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3376 {
3377 if (imm7_off & 0x40)
3378 address = address - offset;
3379 else
3380 address = address + offset;
3381 }
3382
3383 record_buf_mem[0] = datasize / 8;
3384 record_buf_mem[1] = address;
3385 record_buf_mem[2] = datasize / 8;
3386 record_buf_mem[3] = address + (datasize / 8);
3387 aarch64_insn_r->mem_rec_count = 2;
3388 }
3389 if (bit (aarch64_insn_r->aarch64_insn, 23))
3390 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3391 }
3392 /* Load/store register (unsigned immediate) instructions. */
3393 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3394 {
3395 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3396 if (!(opc >> 1))
3397 if (opc & 0x01)
3398 ld_flag = 0x01;
3399 else
3400 ld_flag = 0x0;
3401 else
3402 if (size_bits != 0x03)
3403 ld_flag = 0x01;
3404 else
3405 return AARCH64_RECORD_UNKNOWN;
3406
3407 if (record_debug)
3408 {
3409 debug_printf ("Process record: load/store (unsigned immediate):"
3410 " size %x V %d opc %x\n", size_bits, vector_flag,
3411 opc);
3412 }
3413
3414 if (!ld_flag)
3415 {
3416 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3417 datasize = 8 << size_bits;
3418 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3419 &address);
3420 offset = offset << size_bits;
3421 address = address + offset;
3422
3423 record_buf_mem[0] = datasize >> 3;
3424 record_buf_mem[1] = address;
3425 aarch64_insn_r->mem_rec_count = 1;
3426 }
3427 else
3428 {
3429 if (vector_flag)
3430 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3431 else
3432 record_buf[0] = reg_rt;
3433 aarch64_insn_r->reg_rec_count = 1;
3434 }
3435 }
3436 /* Load/store register (register offset) instructions. */
3437 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3438 && insn_bits10_11 == 0x02 && insn_bit21)
3439 {
3440 if (record_debug)
3441 debug_printf ("Process record: load/store (register offset)\n");
3442 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3443 if (!(opc >> 1))
3444 if (opc & 0x01)
3445 ld_flag = 0x01;
3446 else
3447 ld_flag = 0x0;
3448 else
3449 if (size_bits != 0x03)
3450 ld_flag = 0x01;
3451 else
3452 return AARCH64_RECORD_UNKNOWN;
3453
3454 if (!ld_flag)
3455 {
3456 ULONGEST reg_rm_val;
3457
3458 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3459 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3460 if (bit (aarch64_insn_r->aarch64_insn, 12))
3461 offset = reg_rm_val << size_bits;
3462 else
3463 offset = reg_rm_val;
3464 datasize = 8 << size_bits;
3465 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3466 &address);
3467 address = address + offset;
3468 record_buf_mem[0] = datasize >> 3;
3469 record_buf_mem[1] = address;
3470 aarch64_insn_r->mem_rec_count = 1;
3471 }
3472 else
3473 {
3474 if (vector_flag)
3475 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3476 else
3477 record_buf[0] = reg_rt;
3478 aarch64_insn_r->reg_rec_count = 1;
3479 }
3480 }
3481 /* Load/store register (immediate and unprivileged) instructions. */
3482 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3483 && !insn_bit21)
3484 {
3485 if (record_debug)
3486 {
3487 debug_printf ("Process record: load/store "
3488 "(immediate and unprivileged)\n");
3489 }
3490 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3491 if (!(opc >> 1))
3492 if (opc & 0x01)
3493 ld_flag = 0x01;
3494 else
3495 ld_flag = 0x0;
3496 else
3497 if (size_bits != 0x03)
3498 ld_flag = 0x01;
3499 else
3500 return AARCH64_RECORD_UNKNOWN;
3501
3502 if (!ld_flag)
3503 {
3504 uint16_t imm9_off;
3505 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3506 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3507 datasize = 8 << size_bits;
3508 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3509 &address);
3510 if (insn_bits10_11 != 0x01)
3511 {
3512 if (imm9_off & 0x0100)
3513 address = address - offset;
3514 else
3515 address = address + offset;
3516 }
3517 record_buf_mem[0] = datasize >> 3;
3518 record_buf_mem[1] = address;
3519 aarch64_insn_r->mem_rec_count = 1;
3520 }
3521 else
3522 {
3523 if (vector_flag)
3524 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3525 else
3526 record_buf[0] = reg_rt;
3527 aarch64_insn_r->reg_rec_count = 1;
3528 }
3529 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3530 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3531 }
3532 /* Advanced SIMD load/store instructions. */
3533 else
3534 return aarch64_record_asimd_load_store (aarch64_insn_r);
3535
3536 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3537 record_buf_mem);
3538 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3539 record_buf);
3540 return AARCH64_RECORD_SUCCESS;
3541 }
3542
3543 /* Record handler for data processing SIMD and floating point instructions. */
3544
3545 static unsigned int
3546 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3547 {
3548 uint8_t insn_bit21, opcode, rmode, reg_rd;
3549 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3550 uint8_t insn_bits11_14;
3551 uint32_t record_buf[2];
3552
3553 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3554 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3555 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3556 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3557 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3558 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3559 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3560 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3561 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3562
3563 if (record_debug)
3564 debug_printf ("Process record: data processing SIMD/FP: ");
3565
3566 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3567 {
3568 /* Floating point - fixed point conversion instructions. */
3569 if (!insn_bit21)
3570 {
3571 if (record_debug)
3572 debug_printf ("FP - fixed point conversion");
3573
3574 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3575 record_buf[0] = reg_rd;
3576 else
3577 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3578 }
3579 /* Floating point - conditional compare instructions. */
3580 else if (insn_bits10_11 == 0x01)
3581 {
3582 if (record_debug)
3583 debug_printf ("FP - conditional compare");
3584
3585 record_buf[0] = AARCH64_CPSR_REGNUM;
3586 }
3587 /* Floating point - data processing (2-source) and
3588 conditional select instructions. */
3589 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3590 {
3591 if (record_debug)
3592 debug_printf ("FP - DP (2-source)");
3593
3594 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3595 }
3596 else if (insn_bits10_11 == 0x00)
3597 {
3598 /* Floating point - immediate instructions. */
3599 if ((insn_bits12_15 & 0x01) == 0x01
3600 || (insn_bits12_15 & 0x07) == 0x04)
3601 {
3602 if (record_debug)
3603 debug_printf ("FP - immediate");
3604 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3605 }
3606 /* Floating point - compare instructions. */
3607 else if ((insn_bits12_15 & 0x03) == 0x02)
3608 {
3609 if (record_debug)
3610 debug_printf ("FP - immediate");
3611 record_buf[0] = AARCH64_CPSR_REGNUM;
3612 }
3613 /* Floating point - integer conversions instructions. */
3614 else if (insn_bits12_15 == 0x00)
3615 {
3616 /* Convert float to integer instruction. */
3617 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3618 {
3619 if (record_debug)
3620 debug_printf ("float to int conversion");
3621
3622 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3623 }
3624 /* Convert integer to float instruction. */
3625 else if ((opcode >> 1) == 0x01 && !rmode)
3626 {
3627 if (record_debug)
3628 debug_printf ("int to float conversion");
3629
3630 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3631 }
3632 /* Move float to integer instruction. */
3633 else if ((opcode >> 1) == 0x03)
3634 {
3635 if (record_debug)
3636 debug_printf ("move float to int");
3637
3638 if (!(opcode & 0x01))
3639 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3640 else
3641 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3642 }
3643 else
3644 return AARCH64_RECORD_UNKNOWN;
3645 }
3646 else
3647 return AARCH64_RECORD_UNKNOWN;
3648 }
3649 else
3650 return AARCH64_RECORD_UNKNOWN;
3651 }
3652 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3653 {
3654 if (record_debug)
3655 debug_printf ("SIMD copy");
3656
3657 /* Advanced SIMD copy instructions. */
3658 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3659 && !bit (aarch64_insn_r->aarch64_insn, 15)
3660 && bit (aarch64_insn_r->aarch64_insn, 10))
3661 {
3662 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3663 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3664 else
3665 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3666 }
3667 else
3668 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3669 }
3670 /* All remaining floating point or advanced SIMD instructions. */
3671 else
3672 {
3673 if (record_debug)
3674 debug_printf ("all remain");
3675
3676 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3677 }
3678
3679 if (record_debug)
3680 debug_printf ("\n");
3681
3682 aarch64_insn_r->reg_rec_count++;
3683 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3684 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3685 record_buf);
3686 return AARCH64_RECORD_SUCCESS;
3687 }
3688
3689 /* Decodes insns type and invokes its record handler. */
3690
3691 static unsigned int
3692 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3693 {
3694 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3695
3696 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3697 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3698 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3699 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3700
3701 /* Data processing - immediate instructions. */
3702 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3703 return aarch64_record_data_proc_imm (aarch64_insn_r);
3704
3705 /* Branch, exception generation and system instructions. */
3706 if (ins_bit26 && !ins_bit27 && ins_bit28)
3707 return aarch64_record_branch_except_sys (aarch64_insn_r);
3708
3709 /* Load and store instructions. */
3710 if (!ins_bit25 && ins_bit27)
3711 return aarch64_record_load_store (aarch64_insn_r);
3712
3713 /* Data processing - register instructions. */
3714 if (ins_bit25 && !ins_bit26 && ins_bit27)
3715 return aarch64_record_data_proc_reg (aarch64_insn_r);
3716
3717 /* Data processing - SIMD and floating point instructions. */
3718 if (ins_bit25 && ins_bit26 && ins_bit27)
3719 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3720
3721 return AARCH64_RECORD_UNSUPPORTED;
3722 }
3723
3724 /* Cleans up local record registers and memory allocations. */
3725
3726 static void
3727 deallocate_reg_mem (insn_decode_record *record)
3728 {
3729 xfree (record->aarch64_regs);
3730 xfree (record->aarch64_mems);
3731 }
3732
3733 /* Parse the current instruction and record the values of the registers and
3734 memory that will be changed in current instruction to record_arch_list
3735 return -1 if something is wrong. */
3736
3737 int
3738 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3739 CORE_ADDR insn_addr)
3740 {
3741 uint32_t rec_no = 0;
3742 uint8_t insn_size = 4;
3743 uint32_t ret = 0;
3744 gdb_byte buf[insn_size];
3745 insn_decode_record aarch64_record;
3746
3747 memset (&buf[0], 0, insn_size);
3748 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3749 target_read_memory (insn_addr, &buf[0], insn_size);
3750 aarch64_record.aarch64_insn
3751 = (uint32_t) extract_unsigned_integer (&buf[0],
3752 insn_size,
3753 gdbarch_byte_order (gdbarch));
3754 aarch64_record.regcache = regcache;
3755 aarch64_record.this_addr = insn_addr;
3756 aarch64_record.gdbarch = gdbarch;
3757
3758 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3759 if (ret == AARCH64_RECORD_UNSUPPORTED)
3760 {
3761 printf_unfiltered (_("Process record does not support instruction "
3762 "0x%0x at address %s.\n"),
3763 aarch64_record.aarch64_insn,
3764 paddress (gdbarch, insn_addr));
3765 ret = -1;
3766 }
3767
3768 if (0 == ret)
3769 {
3770 /* Record registers. */
3771 record_full_arch_list_add_reg (aarch64_record.regcache,
3772 AARCH64_PC_REGNUM);
3773 /* Always record register CPSR. */
3774 record_full_arch_list_add_reg (aarch64_record.regcache,
3775 AARCH64_CPSR_REGNUM);
3776 if (aarch64_record.aarch64_regs)
3777 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3778 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3779 aarch64_record.aarch64_regs[rec_no]))
3780 ret = -1;
3781
3782 /* Record memories. */
3783 if (aarch64_record.aarch64_mems)
3784 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3785 if (record_full_arch_list_add_mem
3786 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3787 aarch64_record.aarch64_mems[rec_no].len))
3788 ret = -1;
3789
3790 if (record_full_arch_list_add_end ())
3791 ret = -1;
3792 }
3793
3794 deallocate_reg_mem (&aarch64_record);
3795 return ret;
3796 }
This page took 0.157212 seconds and 4 git commands to generate.