1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2021 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "arch/aarch64-mte-linux.h"
44 #include "linux-aarch32-tdesc.h"
45 #include "linux-aarch64-tdesc.h"
46 #include "nat/aarch64-mte-linux-ptrace.h"
47 #include "nat/aarch64-sve-linux-ptrace.h"
58 /* Linux target op definitions for the AArch64 architecture. */
60 class aarch64_target
: public linux_process_target
64 const regs_info
*get_regs_info () override
;
66 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
68 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
70 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
72 bool supports_z_point_type (char z_type
) override
;
74 bool supports_tracepoints () override
;
76 bool supports_fast_tracepoints () override
;
78 int install_fast_tracepoint_jump_pad
79 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
80 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
81 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
82 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
83 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
86 int get_min_fast_tracepoint_insn_len () override
;
88 struct emit_ops
*emit_ops () override
;
90 bool supports_memory_tagging () override
;
92 bool fetch_memtags (CORE_ADDR address
, size_t len
,
93 gdb::byte_vector
&tags
, int type
) override
;
95 bool store_memtags (CORE_ADDR address
, size_t len
,
96 const gdb::byte_vector
&tags
, int type
) override
;
100 void low_arch_setup () override
;
102 bool low_cannot_fetch_register (int regno
) override
;
104 bool low_cannot_store_register (int regno
) override
;
106 bool low_supports_breakpoints () override
;
108 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
110 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
112 bool low_breakpoint_at (CORE_ADDR pc
) override
;
114 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
115 int size
, raw_breakpoint
*bp
) override
;
117 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
118 int size
, raw_breakpoint
*bp
) override
;
120 bool low_stopped_by_watchpoint () override
;
122 CORE_ADDR
low_stopped_data_address () override
;
124 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
125 int direction
) override
;
127 arch_process_info
*low_new_process () override
;
129 void low_delete_process (arch_process_info
*info
) override
;
131 void low_new_thread (lwp_info
*) override
;
133 void low_delete_thread (arch_lwp_info
*) override
;
135 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
137 void low_prepare_to_resume (lwp_info
*lwp
) override
;
139 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
141 bool low_supports_range_stepping () override
;
143 bool low_supports_catch_syscall () override
;
145 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
148 /* The singleton target ops object. */
150 static aarch64_target the_aarch64_target
;
153 aarch64_target::low_cannot_fetch_register (int regno
)
155 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
156 "is not implemented by the target");
160 aarch64_target::low_cannot_store_register (int regno
)
162 gdb_assert_not_reached ("linux target op low_cannot_store_register "
163 "is not implemented by the target");
167 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
169 aarch64_linux_prepare_to_resume (lwp
);
172 /* Per-process arch-specific data we want to keep. */
174 struct arch_process_info
176 /* Hardware breakpoint/watchpoint data.
177 The reason for them to be per-process rather than per-thread is
178 due to the lack of information in the gdbserver environment;
179 gdbserver is not told that whether a requested hardware
180 breakpoint/watchpoint is thread specific or not, so it has to set
181 each hw bp/wp for every thread in the current process. The
182 higher level bp/wp management in gdb will resume a thread if a hw
183 bp/wp trap is not expected for it. Since the hw bp/wp setting is
184 same for each thread, it is reasonable for the data to live here.
186 struct aarch64_debug_reg_state debug_reg_state
;
189 /* Return true if the size of register 0 is 8 byte. */
192 is_64bit_tdesc (void)
194 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
196 return register_size (regcache
->tdesc
, 0) == 8;
199 /* Return true if the regcache contains the number of SVE registers. */
204 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
206 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
210 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
212 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
215 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
216 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
217 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
218 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
219 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
223 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
225 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
228 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
229 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
230 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
231 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
232 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
236 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
238 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
241 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
242 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
243 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
244 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
248 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
250 const struct user_fpsimd_state
*regset
251 = (const struct user_fpsimd_state
*) buf
;
254 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
255 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
256 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
257 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
260 /* Store the pauth registers to regcache. */
263 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
265 uint64_t *pauth_regset
= (uint64_t *) buf
;
266 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
271 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
273 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
277 /* Fill BUF with the MTE registers from the regcache. */
280 aarch64_fill_mteregset (struct regcache
*regcache
, void *buf
)
282 uint64_t *mte_regset
= (uint64_t *) buf
;
283 int mte_base
= find_regno (regcache
->tdesc
, "tag_ctl");
285 collect_register (regcache
, mte_base
, mte_regset
);
288 /* Store the MTE registers to regcache. */
291 aarch64_store_mteregset (struct regcache
*regcache
, const void *buf
)
293 uint64_t *mte_regset
= (uint64_t *) buf
;
294 int mte_base
= find_regno (regcache
->tdesc
, "tag_ctl");
296 /* Tag Control register */
297 supply_register (regcache
, mte_base
, mte_regset
);
301 aarch64_target::low_supports_breakpoints ()
306 /* Implementation of linux target ops method "low_get_pc". */
309 aarch64_target::low_get_pc (regcache
*regcache
)
311 if (register_size (regcache
->tdesc
, 0) == 8)
312 return linux_get_pc_64bit (regcache
);
314 return linux_get_pc_32bit (regcache
);
317 /* Implementation of linux target ops method "low_set_pc". */
320 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
322 if (register_size (regcache
->tdesc
, 0) == 8)
323 linux_set_pc_64bit (regcache
, pc
);
325 linux_set_pc_32bit (regcache
, pc
);
328 #define aarch64_breakpoint_len 4
330 /* AArch64 BRK software debug mode instruction.
331 This instruction needs to match gdb/aarch64-tdep.c
332 (aarch64_default_breakpoint). */
333 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
335 /* Implementation of linux target ops method "low_breakpoint_at". */
338 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
340 if (is_64bit_tdesc ())
342 gdb_byte insn
[aarch64_breakpoint_len
];
344 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
345 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
351 return arm_breakpoint_at (where
);
355 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
359 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
361 state
->dr_addr_bp
[i
] = 0;
362 state
->dr_ctrl_bp
[i
] = 0;
363 state
->dr_ref_count_bp
[i
] = 0;
366 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
368 state
->dr_addr_wp
[i
] = 0;
369 state
->dr_ctrl_wp
[i
] = 0;
370 state
->dr_ref_count_wp
[i
] = 0;
374 /* Return the pointer to the debug register state structure in the
375 current process' arch-specific data area. */
377 struct aarch64_debug_reg_state
*
378 aarch64_get_debug_reg_state (pid_t pid
)
380 struct process_info
*proc
= find_process_pid (pid
);
382 return &proc
->priv
->arch_private
->debug_reg_state
;
385 /* Implementation of target ops method "supports_z_point_type". */
388 aarch64_target::supports_z_point_type (char z_type
)
394 case Z_PACKET_WRITE_WP
:
395 case Z_PACKET_READ_WP
:
396 case Z_PACKET_ACCESS_WP
:
403 /* Implementation of linux target ops method "low_insert_point".
405 It actually only records the info of the to-be-inserted bp/wp;
406 the actual insertion will happen when threads are resumed. */
409 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
410 int len
, raw_breakpoint
*bp
)
413 enum target_hw_bp_type targ_type
;
414 struct aarch64_debug_reg_state
*state
415 = aarch64_get_debug_reg_state (pid_of (current_thread
));
418 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
419 (unsigned long) addr
, len
);
421 /* Determine the type from the raw breakpoint type. */
422 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
424 if (targ_type
!= hw_execute
)
426 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
427 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
428 1 /* is_insert */, state
);
436 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
437 instruction. Set it to 2 to correctly encode length bit
438 mask in hardware/watchpoint control register. */
441 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
442 1 /* is_insert */, state
);
446 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
452 /* Implementation of linux target ops method "low_remove_point".
454 It actually only records the info of the to-be-removed bp/wp,
455 the actual removal will be done when threads are resumed. */
458 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
459 int len
, raw_breakpoint
*bp
)
462 enum target_hw_bp_type targ_type
;
463 struct aarch64_debug_reg_state
*state
464 = aarch64_get_debug_reg_state (pid_of (current_thread
));
467 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
468 (unsigned long) addr
, len
);
470 /* Determine the type from the raw breakpoint type. */
471 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
473 /* Set up state pointers. */
474 if (targ_type
!= hw_execute
)
476 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
482 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
483 instruction. Set it to 2 to correctly encode length bit
484 mask in hardware/watchpoint control register. */
487 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
488 0 /* is_insert */, state
);
492 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
498 /* Return the address only having significant bits. This is used to ignore
499 the top byte (TBI). */
502 address_significant (CORE_ADDR addr
)
504 /* Clear insignificant bits of a target address and sign extend resulting
508 CORE_ADDR sign
= (CORE_ADDR
) 1 << (addr_bit
- 1);
509 addr
&= ((CORE_ADDR
) 1 << addr_bit
) - 1;
510 addr
= (addr
^ sign
) - sign
;
515 /* Implementation of linux target ops method "low_stopped_data_address". */
518 aarch64_target::low_stopped_data_address ()
522 struct aarch64_debug_reg_state
*state
;
524 pid
= lwpid_of (current_thread
);
526 /* Get the siginfo. */
527 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
528 return (CORE_ADDR
) 0;
530 /* Need to be a hardware breakpoint/watchpoint trap. */
531 if (siginfo
.si_signo
!= SIGTRAP
532 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
533 return (CORE_ADDR
) 0;
535 /* Make sure to ignore the top byte, otherwise we may not recognize a
536 hardware watchpoint hit. The stopped data addresses coming from the
537 kernel can potentially be tagged addresses. */
538 const CORE_ADDR addr_trap
539 = address_significant ((CORE_ADDR
) siginfo
.si_addr
);
541 /* Check if the address matches any watched address. */
542 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
543 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
545 const unsigned int offset
546 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
547 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
548 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
549 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
550 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
552 if (state
->dr_ref_count_wp
[i
]
553 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
554 && addr_trap
>= addr_watch_aligned
555 && addr_trap
< addr_watch
+ len
)
557 /* ADDR_TRAP reports the first address of the memory range
558 accessed by the CPU, regardless of what was the memory
559 range watched. Thus, a large CPU access that straddles
560 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
561 ADDR_TRAP that is lower than the
562 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
564 addr: | 4 | 5 | 6 | 7 | 8 |
565 |---- range watched ----|
566 |----------- range accessed ------------|
568 In this case, ADDR_TRAP will be 4.
570 To match a watchpoint known to GDB core, we must never
571 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
572 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
573 positive on kernels older than 4.10. See PR
579 return (CORE_ADDR
) 0;
582 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
585 aarch64_target::low_stopped_by_watchpoint ()
587 return (low_stopped_data_address () != 0);
590 /* Fetch the thread-local storage pointer for libthread_db. */
593 ps_get_thread_area (struct ps_prochandle
*ph
,
594 lwpid_t lwpid
, int idx
, void **base
)
596 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
600 /* Implementation of linux target ops method "low_siginfo_fixup". */
603 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
606 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
607 if (!is_64bit_tdesc ())
610 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
613 aarch64_siginfo_from_compat_siginfo (native
,
614 (struct compat_siginfo
*) inf
);
622 /* Implementation of linux target ops method "low_new_process". */
625 aarch64_target::low_new_process ()
627 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
629 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
634 /* Implementation of linux target ops method "low_delete_process". */
637 aarch64_target::low_delete_process (arch_process_info
*info
)
643 aarch64_target::low_new_thread (lwp_info
*lwp
)
645 aarch64_linux_new_thread (lwp
);
649 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
651 aarch64_linux_delete_thread (arch_lwp
);
654 /* Implementation of linux target ops method "low_new_fork". */
657 aarch64_target::low_new_fork (process_info
*parent
,
660 /* These are allocated by linux_add_process. */
661 gdb_assert (parent
->priv
!= NULL
662 && parent
->priv
->arch_private
!= NULL
);
663 gdb_assert (child
->priv
!= NULL
664 && child
->priv
->arch_private
!= NULL
);
666 /* Linux kernel before 2.6.33 commit
667 72f674d203cd230426437cdcf7dd6f681dad8b0d
668 will inherit hardware debug registers from parent
669 on fork/vfork/clone. Newer Linux kernels create such tasks with
670 zeroed debug registers.
672 GDB core assumes the child inherits the watchpoints/hw
673 breakpoints of the parent, and will remove them all from the
674 forked off process. Copy the debug registers mirrors into the
675 new process so that all breakpoints and watchpoints can be
676 removed together. The debug registers mirror will become zeroed
677 in the end before detaching the forked off process, thus making
678 this compatible with older Linux kernels too. */
680 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
683 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
684 #define AARCH64_HWCAP_PACA (1 << 30)
686 /* Implementation of linux target ops method "low_arch_setup". */
689 aarch64_target::low_arch_setup ()
691 unsigned int machine
;
695 tid
= lwpid_of (current_thread
);
697 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
701 uint64_t vq
= aarch64_sve_get_vq (tid
);
702 unsigned long hwcap
= linux_get_hwcap (8);
703 unsigned long hwcap2
= linux_get_hwcap2 (8);
704 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
705 /* MTE is AArch64-only. */
706 bool mte_p
= hwcap2
& HWCAP2_MTE
;
708 current_process ()->tdesc
709 = aarch64_linux_read_description (vq
, pauth_p
, mte_p
);
712 current_process ()->tdesc
= aarch32_linux_read_description ();
714 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
717 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
720 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
722 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
725 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
728 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
730 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
733 static struct regset_info aarch64_regsets
[] =
735 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
736 sizeof (struct user_pt_regs
), GENERAL_REGS
,
737 aarch64_fill_gregset
, aarch64_store_gregset
},
738 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
739 sizeof (struct user_fpsimd_state
), FP_REGS
,
740 aarch64_fill_fpregset
, aarch64_store_fpregset
742 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
743 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
744 NULL
, aarch64_store_pauthregset
},
745 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_TAGGED_ADDR_CTRL
,
746 AARCH64_LINUX_SIZEOF_MTE
, OPTIONAL_REGS
, aarch64_fill_mteregset
,
747 aarch64_store_mteregset
},
751 static struct regsets_info aarch64_regsets_info
=
753 aarch64_regsets
, /* regsets */
755 NULL
, /* disabled_regsets */
758 static struct regs_info regs_info_aarch64
=
760 NULL
, /* regset_bitmap */
762 &aarch64_regsets_info
,
765 static struct regset_info aarch64_sve_regsets
[] =
767 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
768 sizeof (struct user_pt_regs
), GENERAL_REGS
,
769 aarch64_fill_gregset
, aarch64_store_gregset
},
770 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
771 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
772 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
774 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
775 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
776 NULL
, aarch64_store_pauthregset
},
777 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_TAGGED_ADDR_CTRL
,
778 AARCH64_LINUX_SIZEOF_MTE
, OPTIONAL_REGS
, aarch64_fill_mteregset
,
779 aarch64_store_mteregset
},
783 static struct regsets_info aarch64_sve_regsets_info
=
785 aarch64_sve_regsets
, /* regsets. */
786 0, /* num_regsets. */
787 NULL
, /* disabled_regsets. */
790 static struct regs_info regs_info_aarch64_sve
=
792 NULL
, /* regset_bitmap. */
794 &aarch64_sve_regsets_info
,
797 /* Implementation of linux target ops method "get_regs_info". */
800 aarch64_target::get_regs_info ()
802 if (!is_64bit_tdesc ())
803 return ®s_info_aarch32
;
806 return ®s_info_aarch64_sve
;
808 return ®s_info_aarch64
;
811 /* Implementation of target ops method "supports_tracepoints". */
814 aarch64_target::supports_tracepoints ()
816 if (current_thread
== NULL
)
820 /* We don't support tracepoints on aarch32 now. */
821 return is_64bit_tdesc ();
825 /* Implementation of linux target ops method "low_get_thread_area". */
828 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
833 iovec
.iov_base
= ®
;
834 iovec
.iov_len
= sizeof (reg
);
836 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
845 aarch64_target::low_supports_catch_syscall ()
850 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
853 aarch64_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
855 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
861 collect_register_by_name (regcache
, "x8", &l_sysno
);
862 *sysno
= (int) l_sysno
;
865 collect_register_by_name (regcache
, "r7", sysno
);
868 /* List of condition codes that we need. */
870 enum aarch64_condition_codes
881 enum aarch64_operand_type
887 /* Representation of an operand. At this time, it only supports register
888 and immediate types. */
890 struct aarch64_operand
892 /* Type of the operand. */
893 enum aarch64_operand_type type
;
895 /* Value of the operand according to the type. */
899 struct aarch64_register reg
;
903 /* List of registers that we are currently using, we can add more here as
904 we need to use them. */
906 /* General purpose scratch registers (64 bit). */
907 static const struct aarch64_register x0
= { 0, 1 };
908 static const struct aarch64_register x1
= { 1, 1 };
909 static const struct aarch64_register x2
= { 2, 1 };
910 static const struct aarch64_register x3
= { 3, 1 };
911 static const struct aarch64_register x4
= { 4, 1 };
913 /* General purpose scratch registers (32 bit). */
914 static const struct aarch64_register w0
= { 0, 0 };
915 static const struct aarch64_register w2
= { 2, 0 };
917 /* Intra-procedure scratch registers. */
918 static const struct aarch64_register ip0
= { 16, 1 };
920 /* Special purpose registers. */
921 static const struct aarch64_register fp
= { 29, 1 };
922 static const struct aarch64_register lr
= { 30, 1 };
923 static const struct aarch64_register sp
= { 31, 1 };
924 static const struct aarch64_register xzr
= { 31, 1 };
926 /* Dynamically allocate a new register. If we know the register
927 statically, we should make it a global as above instead of using this
930 static struct aarch64_register
931 aarch64_register (unsigned num
, int is64
)
933 return (struct aarch64_register
) { num
, is64
};
936 /* Helper function to create a register operand, for instructions with
937 different types of operands.
940 p += emit_mov (p, x0, register_operand (x1)); */
942 static struct aarch64_operand
943 register_operand (struct aarch64_register reg
)
945 struct aarch64_operand operand
;
947 operand
.type
= OPERAND_REGISTER
;
953 /* Helper function to create an immediate operand, for instructions with
954 different types of operands.
957 p += emit_mov (p, x0, immediate_operand (12)); */
959 static struct aarch64_operand
960 immediate_operand (uint32_t imm
)
962 struct aarch64_operand operand
;
964 operand
.type
= OPERAND_IMMEDIATE
;
970 /* Helper function to create an offset memory operand.
973 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
975 static struct aarch64_memory_operand
976 offset_memory_operand (int32_t offset
)
978 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
981 /* Helper function to create a pre-index memory operand.
984 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
986 static struct aarch64_memory_operand
987 preindex_memory_operand (int32_t index
)
989 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
992 /* Helper function to create a post-index memory operand.
995 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
997 static struct aarch64_memory_operand
998 postindex_memory_operand (int32_t index
)
1000 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
1003 /* System control registers. These special registers can be written and
1004 read with the MRS and MSR instructions.
1006 - NZCV: Condition flags. GDB refers to this register under the CPSR
1008 - FPSR: Floating-point status register.
1009 - FPCR: Floating-point control registers.
1010 - TPIDR_EL0: Software thread ID register. */
1012 enum aarch64_system_control_registers
1014 /* op0 op1 crn crm op2 */
1015 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1016 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1017 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1018 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1021 /* Write a BLR instruction into *BUF.
1025 RN is the register to branch to. */
1028 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
1030 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
1033 /* Write a RET instruction into *BUF.
1037 RN is the register to branch to. */
1040 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
1042 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
1046 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
1047 struct aarch64_register rt
,
1048 struct aarch64_register rt2
,
1049 struct aarch64_register rn
,
1050 struct aarch64_memory_operand operand
)
1054 uint32_t write_back
;
1057 opc
= ENCODE (2, 2, 30);
1059 opc
= ENCODE (0, 2, 30);
1061 switch (operand
.type
)
1063 case MEMORY_OPERAND_OFFSET
:
1065 pre_index
= ENCODE (1, 1, 24);
1066 write_back
= ENCODE (0, 1, 23);
1069 case MEMORY_OPERAND_POSTINDEX
:
1071 pre_index
= ENCODE (0, 1, 24);
1072 write_back
= ENCODE (1, 1, 23);
1075 case MEMORY_OPERAND_PREINDEX
:
1077 pre_index
= ENCODE (1, 1, 24);
1078 write_back
= ENCODE (1, 1, 23);
1085 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1086 | ENCODE (operand
.index
>> 3, 7, 15)
1087 | ENCODE (rt2
.num
, 5, 10)
1088 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1091 /* Write a STP instruction into *BUF.
1093 STP rt, rt2, [rn, #offset]
1094 STP rt, rt2, [rn, #index]!
1095 STP rt, rt2, [rn], #index
1097 RT and RT2 are the registers to store.
1098 RN is the base address register.
1099 OFFSET is the immediate to add to the base address. It is limited to a
1100 -512 .. 504 range (7 bits << 3). */
1103 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1104 struct aarch64_register rt2
, struct aarch64_register rn
,
1105 struct aarch64_memory_operand operand
)
1107 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1110 /* Write a LDP instruction into *BUF.
1112 LDP rt, rt2, [rn, #offset]
1113 LDP rt, rt2, [rn, #index]!
1114 LDP rt, rt2, [rn], #index
1116 RT and RT2 are the registers to store.
1117 RN is the base address register.
1118 OFFSET is the immediate to add to the base address. It is limited to a
1119 -512 .. 504 range (7 bits << 3). */
1122 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1123 struct aarch64_register rt2
, struct aarch64_register rn
,
1124 struct aarch64_memory_operand operand
)
1126 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1129 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1131 LDP qt, qt2, [rn, #offset]
1133 RT and RT2 are the Q registers to store.
1134 RN is the base address register.
1135 OFFSET is the immediate to add to the base address. It is limited to
1136 -1024 .. 1008 range (7 bits << 4). */
1139 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1140 struct aarch64_register rn
, int32_t offset
)
1142 uint32_t opc
= ENCODE (2, 2, 30);
1143 uint32_t pre_index
= ENCODE (1, 1, 24);
1145 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1146 | ENCODE (offset
>> 4, 7, 15)
1147 | ENCODE (rt2
, 5, 10)
1148 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1151 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1153 STP qt, qt2, [rn, #offset]
1155 RT and RT2 are the Q registers to store.
1156 RN is the base address register.
1157 OFFSET is the immediate to add to the base address. It is limited to
1158 -1024 .. 1008 range (7 bits << 4). */
1161 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1162 struct aarch64_register rn
, int32_t offset
)
1164 uint32_t opc
= ENCODE (2, 2, 30);
1165 uint32_t pre_index
= ENCODE (1, 1, 24);
1167 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1168 | ENCODE (offset
>> 4, 7, 15)
1169 | ENCODE (rt2
, 5, 10)
1170 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1173 /* Write a LDRH instruction into *BUF.
1175 LDRH wt, [xn, #offset]
1176 LDRH wt, [xn, #index]!
1177 LDRH wt, [xn], #index
1179 RT is the register to store.
1180 RN is the base address register.
1181 OFFSET is the immediate to add to the base address. It is limited to
1182 0 .. 32760 range (12 bits << 3). */
1185 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1186 struct aarch64_register rn
,
1187 struct aarch64_memory_operand operand
)
1189 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1192 /* Write a LDRB instruction into *BUF.
1194 LDRB wt, [xn, #offset]
1195 LDRB wt, [xn, #index]!
1196 LDRB wt, [xn], #index
1198 RT is the register to store.
1199 RN is the base address register.
1200 OFFSET is the immediate to add to the base address. It is limited to
1201 0 .. 32760 range (12 bits << 3). */
1204 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1205 struct aarch64_register rn
,
1206 struct aarch64_memory_operand operand
)
1208 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1213 /* Write a STR instruction into *BUF.
1215 STR rt, [rn, #offset]
1216 STR rt, [rn, #index]!
1217 STR rt, [rn], #index
1219 RT is the register to store.
1220 RN is the base address register.
1221 OFFSET is the immediate to add to the base address. It is limited to
1222 0 .. 32760 range (12 bits << 3). */
1225 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1226 struct aarch64_register rn
,
1227 struct aarch64_memory_operand operand
)
1229 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1232 /* Helper function emitting an exclusive load or store instruction. */
1235 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1236 enum aarch64_opcodes opcode
,
1237 struct aarch64_register rs
,
1238 struct aarch64_register rt
,
1239 struct aarch64_register rt2
,
1240 struct aarch64_register rn
)
1242 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1243 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1244 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1247 /* Write a LAXR instruction into *BUF.
1251 RT is the destination register.
1252 RN is the base address register. */
1255 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1256 struct aarch64_register rn
)
1258 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1262 /* Write a STXR instruction into *BUF.
1266 RS is the result register, it indicates if the store succeeded or not.
1267 RT is the destination register.
1268 RN is the base address register. */
1271 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1272 struct aarch64_register rt
, struct aarch64_register rn
)
1274 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1278 /* Write a STLR instruction into *BUF.
1282 RT is the register to store.
1283 RN is the base address register. */
1286 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1287 struct aarch64_register rn
)
1289 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1293 /* Helper function for data processing instructions with register sources. */
1296 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1297 struct aarch64_register rd
,
1298 struct aarch64_register rn
,
1299 struct aarch64_register rm
)
1301 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1303 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1304 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1307 /* Helper function for data processing instructions taking either a register
1311 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1312 struct aarch64_register rd
,
1313 struct aarch64_register rn
,
1314 struct aarch64_operand operand
)
1316 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1317 /* The opcode is different for register and immediate source operands. */
1318 uint32_t operand_opcode
;
1320 if (operand
.type
== OPERAND_IMMEDIATE
)
1322 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1323 operand_opcode
= ENCODE (8, 4, 25);
1325 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1326 | ENCODE (operand
.imm
, 12, 10)
1327 | ENCODE (rn
.num
, 5, 5)
1328 | ENCODE (rd
.num
, 5, 0));
1332 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1333 operand_opcode
= ENCODE (5, 4, 25);
1335 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1340 /* Write an ADD instruction into *BUF.
1345 This function handles both an immediate and register add.
1347 RD is the destination register.
1348 RN is the input register.
1349 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1350 OPERAND_REGISTER. */
1353 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1354 struct aarch64_register rn
, struct aarch64_operand operand
)
1356 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1359 /* Write a SUB instruction into *BUF.
1364 This function handles both an immediate and register sub.
1366 RD is the destination register.
1367 RN is the input register.
1368 IMM is the immediate to substract to RN. */
1371 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1372 struct aarch64_register rn
, struct aarch64_operand operand
)
1374 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1377 /* Write a MOV instruction into *BUF.
1382 This function handles both a wide immediate move and a register move,
1383 with the condition that the source register is not xzr. xzr and the
1384 stack pointer share the same encoding and this function only supports
1387 RD is the destination register.
1388 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1389 OPERAND_REGISTER. */
1392 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1393 struct aarch64_operand operand
)
1395 if (operand
.type
== OPERAND_IMMEDIATE
)
1397 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1398 /* Do not shift the immediate. */
1399 uint32_t shift
= ENCODE (0, 2, 21);
1401 return aarch64_emit_insn (buf
, MOV
| size
| shift
1402 | ENCODE (operand
.imm
, 16, 5)
1403 | ENCODE (rd
.num
, 5, 0));
1406 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1409 /* Write a MOVK instruction into *BUF.
1411 MOVK rd, #imm, lsl #shift
1413 RD is the destination register.
1414 IMM is the immediate.
1415 SHIFT is the logical shift left to apply to IMM. */
1418 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1421 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1423 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1424 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1427 /* Write instructions into *BUF in order to move ADDR into a register.
1428 ADDR can be a 64-bit value.
1430 This function will emit a series of MOV and MOVK instructions, such as:
1433 MOVK xd, #(addr >> 16), lsl #16
1434 MOVK xd, #(addr >> 32), lsl #32
1435 MOVK xd, #(addr >> 48), lsl #48 */
1438 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1442 /* The MOV (wide immediate) instruction clears to top bits of the
1444 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1446 if ((addr
>> 16) != 0)
1447 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1451 if ((addr
>> 32) != 0)
1452 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1456 if ((addr
>> 48) != 0)
1457 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1462 /* Write a SUBS instruction into *BUF.
1466 This instruction update the condition flags.
1468 RD is the destination register.
1469 RN and RM are the source registers. */
1472 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1473 struct aarch64_register rn
, struct aarch64_operand operand
)
1475 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1478 /* Write a CMP instruction into *BUF.
1482 This instruction is an alias of SUBS xzr, rn, rm.
1484 RN and RM are the registers to compare. */
1487 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1488 struct aarch64_operand operand
)
1490 return emit_subs (buf
, xzr
, rn
, operand
);
1493 /* Write a AND instruction into *BUF.
1497 RD is the destination register.
1498 RN and RM are the source registers. */
1501 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1502 struct aarch64_register rn
, struct aarch64_register rm
)
1504 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1507 /* Write a ORR instruction into *BUF.
1511 RD is the destination register.
1512 RN and RM are the source registers. */
1515 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1516 struct aarch64_register rn
, struct aarch64_register rm
)
1518 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1521 /* Write a ORN instruction into *BUF.
1525 RD is the destination register.
1526 RN and RM are the source registers. */
1529 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1530 struct aarch64_register rn
, struct aarch64_register rm
)
1532 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1535 /* Write a EOR instruction into *BUF.
1539 RD is the destination register.
1540 RN and RM are the source registers. */
1543 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1544 struct aarch64_register rn
, struct aarch64_register rm
)
1546 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1549 /* Write a MVN instruction into *BUF.
1553 This is an alias for ORN rd, xzr, rm.
1555 RD is the destination register.
1556 RM is the source register. */
1559 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1560 struct aarch64_register rm
)
1562 return emit_orn (buf
, rd
, xzr
, rm
);
1565 /* Write a LSLV instruction into *BUF.
1569 RD is the destination register.
1570 RN and RM are the source registers. */
1573 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1574 struct aarch64_register rn
, struct aarch64_register rm
)
1576 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1579 /* Write a LSRV instruction into *BUF.
1583 RD is the destination register.
1584 RN and RM are the source registers. */
1587 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1588 struct aarch64_register rn
, struct aarch64_register rm
)
1590 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1593 /* Write a ASRV instruction into *BUF.
1597 RD is the destination register.
1598 RN and RM are the source registers. */
1601 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1602 struct aarch64_register rn
, struct aarch64_register rm
)
1604 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1607 /* Write a MUL instruction into *BUF.
1611 RD is the destination register.
1612 RN and RM are the source registers. */
1615 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1616 struct aarch64_register rn
, struct aarch64_register rm
)
1618 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1621 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1625 RT is the destination register.
1626 SYSTEM_REG is special purpose register to read. */
1629 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1630 enum aarch64_system_control_registers system_reg
)
1632 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1633 | ENCODE (rt
.num
, 5, 0));
1636 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1640 SYSTEM_REG is special purpose register to write.
1641 RT is the input register. */
1644 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1645 struct aarch64_register rt
)
1647 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1648 | ENCODE (rt
.num
, 5, 0));
1651 /* Write a SEVL instruction into *BUF.
1653 This is a hint instruction telling the hardware to trigger an event. */
1656 emit_sevl (uint32_t *buf
)
1658 return aarch64_emit_insn (buf
, SEVL
);
1661 /* Write a WFE instruction into *BUF.
1663 This is a hint instruction telling the hardware to wait for an event. */
1666 emit_wfe (uint32_t *buf
)
1668 return aarch64_emit_insn (buf
, WFE
);
1671 /* Write a SBFM instruction into *BUF.
1673 SBFM rd, rn, #immr, #imms
1675 This instruction moves the bits from #immr to #imms into the
1676 destination, sign extending the result.
1678 RD is the destination register.
1679 RN is the source register.
1680 IMMR is the bit number to start at (least significant bit).
1681 IMMS is the bit number to stop at (most significant bit). */
1684 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1685 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1687 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1688 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1690 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1691 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1692 | ENCODE (rd
.num
, 5, 0));
1695 /* Write a SBFX instruction into *BUF.
1697 SBFX rd, rn, #lsb, #width
1699 This instruction moves #width bits from #lsb into the destination, sign
1700 extending the result. This is an alias for:
1702 SBFM rd, rn, #lsb, #(lsb + width - 1)
1704 RD is the destination register.
1705 RN is the source register.
1706 LSB is the bit number to start at (least significant bit).
1707 WIDTH is the number of bits to move. */
1710 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1711 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1713 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1716 /* Write a UBFM instruction into *BUF.
1718 UBFM rd, rn, #immr, #imms
1720 This instruction moves the bits from #immr to #imms into the
1721 destination, extending the result with zeros.
1723 RD is the destination register.
1724 RN is the source register.
1725 IMMR is the bit number to start at (least significant bit).
1726 IMMS is the bit number to stop at (most significant bit). */
1729 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1730 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1732 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1733 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1735 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1736 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1737 | ENCODE (rd
.num
, 5, 0));
1740 /* Write a UBFX instruction into *BUF.
1742 UBFX rd, rn, #lsb, #width
1744 This instruction moves #width bits from #lsb into the destination,
1745 extending the result with zeros. This is an alias for:
1747 UBFM rd, rn, #lsb, #(lsb + width - 1)
1749 RD is the destination register.
1750 RN is the source register.
1751 LSB is the bit number to start at (least significant bit).
1752 WIDTH is the number of bits to move. */
1755 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1756 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1758 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1761 /* Write a CSINC instruction into *BUF.
1763 CSINC rd, rn, rm, cond
1765 This instruction conditionally increments rn or rm and places the result
1766 in rd. rn is chosen is the condition is true.
1768 RD is the destination register.
1769 RN and RM are the source registers.
1770 COND is the encoded condition. */
1773 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1774 struct aarch64_register rn
, struct aarch64_register rm
,
1777 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1779 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1780 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1781 | ENCODE (rd
.num
, 5, 0));
1784 /* Write a CSET instruction into *BUF.
1788 This instruction conditionally write 1 or 0 in the destination register.
1789 1 is written if the condition is true. This is an alias for:
1791 CSINC rd, xzr, xzr, !cond
1793 Note that the condition needs to be inverted.
1795 RD is the destination register.
1796 RN and RM are the source registers.
1797 COND is the encoded condition. */
1800 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1802 /* The least significant bit of the condition needs toggling in order to
1804 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1807 /* Write LEN instructions from BUF into the inferior memory at *TO.
1809 Note instructions are always little endian on AArch64, unlike data. */
1812 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1814 size_t byte_len
= len
* sizeof (uint32_t);
1815 #if (__BYTE_ORDER == __BIG_ENDIAN)
1816 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1819 for (i
= 0; i
< len
; i
++)
1820 le_buf
[i
] = htole32 (buf
[i
]);
1822 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1826 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1832 /* Sub-class of struct aarch64_insn_data, store information of
1833 instruction relocation for fast tracepoint. Visitor can
1834 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1835 the relocated instructions in buffer pointed by INSN_PTR. */
1837 struct aarch64_insn_relocation_data
1839 struct aarch64_insn_data base
;
1841 /* The new address the instruction is relocated to. */
1843 /* Pointer to the buffer of relocated instruction(s). */
1847 /* Implementation of aarch64_insn_visitor method "b". */
1850 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1851 struct aarch64_insn_data
*data
)
1853 struct aarch64_insn_relocation_data
*insn_reloc
1854 = (struct aarch64_insn_relocation_data
*) data
;
1856 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1858 if (can_encode_int32 (new_offset
, 28))
1859 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1862 /* Implementation of aarch64_insn_visitor method "b_cond". */
1865 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1866 struct aarch64_insn_data
*data
)
1868 struct aarch64_insn_relocation_data
*insn_reloc
1869 = (struct aarch64_insn_relocation_data
*) data
;
1871 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1873 if (can_encode_int32 (new_offset
, 21))
1875 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1878 else if (can_encode_int32 (new_offset
, 28))
1880 /* The offset is out of range for a conditional branch
1881 instruction but not for a unconditional branch. We can use
1882 the following instructions instead:
1884 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1885 B NOT_TAKEN ; Else jump over TAKEN and continue.
1892 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1893 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1894 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1898 /* Implementation of aarch64_insn_visitor method "cb". */
1901 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1902 const unsigned rn
, int is64
,
1903 struct aarch64_insn_data
*data
)
1905 struct aarch64_insn_relocation_data
*insn_reloc
1906 = (struct aarch64_insn_relocation_data
*) data
;
1908 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1910 if (can_encode_int32 (new_offset
, 21))
1912 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1913 aarch64_register (rn
, is64
), new_offset
);
1915 else if (can_encode_int32 (new_offset
, 28))
1917 /* The offset is out of range for a compare and branch
1918 instruction but not for a unconditional branch. We can use
1919 the following instructions instead:
1921 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1922 B NOT_TAKEN ; Else jump over TAKEN and continue.
1928 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1929 aarch64_register (rn
, is64
), 8);
1930 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1931 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1935 /* Implementation of aarch64_insn_visitor method "tb". */
1938 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1939 const unsigned rt
, unsigned bit
,
1940 struct aarch64_insn_data
*data
)
1942 struct aarch64_insn_relocation_data
*insn_reloc
1943 = (struct aarch64_insn_relocation_data
*) data
;
1945 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1947 if (can_encode_int32 (new_offset
, 16))
1949 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1950 aarch64_register (rt
, 1), new_offset
);
1952 else if (can_encode_int32 (new_offset
, 28))
1954 /* The offset is out of range for a test bit and branch
1955 instruction but not for a unconditional branch. We can use
1956 the following instructions instead:
1958 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1959 B NOT_TAKEN ; Else jump over TAKEN and continue.
1965 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1966 aarch64_register (rt
, 1), 8);
1967 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1968 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1973 /* Implementation of aarch64_insn_visitor method "adr". */
1976 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1978 struct aarch64_insn_data
*data
)
1980 struct aarch64_insn_relocation_data
*insn_reloc
1981 = (struct aarch64_insn_relocation_data
*) data
;
1982 /* We know exactly the address the ADR{P,} instruction will compute.
1983 We can just write it to the destination register. */
1984 CORE_ADDR address
= data
->insn_addr
+ offset
;
1988 /* Clear the lower 12 bits of the offset to get the 4K page. */
1989 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1990 aarch64_register (rd
, 1),
1994 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1995 aarch64_register (rd
, 1), address
);
1998 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2001 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
2002 const unsigned rt
, const int is64
,
2003 struct aarch64_insn_data
*data
)
2005 struct aarch64_insn_relocation_data
*insn_reloc
2006 = (struct aarch64_insn_relocation_data
*) data
;
2007 CORE_ADDR address
= data
->insn_addr
+ offset
;
2009 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2010 aarch64_register (rt
, 1), address
);
2012 /* We know exactly what address to load from, and what register we
2015 MOV xd, #(oldloc + offset)
2016 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2019 LDR xd, [xd] ; or LDRSW xd, [xd]
2024 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
2025 aarch64_register (rt
, 1),
2026 aarch64_register (rt
, 1),
2027 offset_memory_operand (0));
2029 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
2030 aarch64_register (rt
, is64
),
2031 aarch64_register (rt
, 1),
2032 offset_memory_operand (0));
2035 /* Implementation of aarch64_insn_visitor method "others". */
2038 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
2039 struct aarch64_insn_data
*data
)
2041 struct aarch64_insn_relocation_data
*insn_reloc
2042 = (struct aarch64_insn_relocation_data
*) data
;
2044 /* The instruction is not PC relative. Just re-emit it at the new
2046 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
2049 static const struct aarch64_insn_visitor visitor
=
2051 aarch64_ftrace_insn_reloc_b
,
2052 aarch64_ftrace_insn_reloc_b_cond
,
2053 aarch64_ftrace_insn_reloc_cb
,
2054 aarch64_ftrace_insn_reloc_tb
,
2055 aarch64_ftrace_insn_reloc_adr
,
2056 aarch64_ftrace_insn_reloc_ldr_literal
,
2057 aarch64_ftrace_insn_reloc_others
,
2061 aarch64_target::supports_fast_tracepoints ()
2066 /* Implementation of target ops method
2067 "install_fast_tracepoint_jump_pad". */
2070 aarch64_target::install_fast_tracepoint_jump_pad
2071 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
2072 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
2073 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
2074 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
2075 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2083 CORE_ADDR buildaddr
= *jump_entry
;
2084 struct aarch64_insn_relocation_data insn_data
;
2086 /* We need to save the current state on the stack both to restore it
2087 later and to collect register values when the tracepoint is hit.
2089 The saved registers are pushed in a layout that needs to be in sync
2090 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2091 the supply_fast_tracepoint_registers function will fill in the
2092 register cache from a pointer to saved registers on the stack we build
2095 For simplicity, we set the size of each cell on the stack to 16 bytes.
2096 This way one cell can hold any register type, from system registers
2097 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2098 has to be 16 bytes aligned anyway.
2100 Note that the CPSR register does not exist on AArch64. Instead we
2101 can access system bits describing the process state with the
2102 MRS/MSR instructions, namely the condition flags. We save them as
2103 if they are part of a CPSR register because that's how GDB
2104 interprets these system bits. At the moment, only the condition
2105 flags are saved in CPSR (NZCV).
2107 Stack layout, each cell is 16 bytes (descending):
2109 High *-------- SIMD&FP registers from 31 down to 0. --------*
2115 *---- General purpose registers from 30 down to 0. ----*
2121 *------------- Special purpose registers. -------------*
2124 | CPSR (NZCV) | 5 cells
2127 *------------- collecting_t object --------------------*
2128 | TPIDR_EL0 | struct tracepoint * |
2129 Low *------------------------------------------------------*
2131 After this stack is set up, we issue a call to the collector, passing
2132 it the saved registers at (SP + 16). */
2134 /* Push SIMD&FP registers on the stack:
2136 SUB sp, sp, #(32 * 16)
2138 STP q30, q31, [sp, #(30 * 16)]
2143 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2144 for (i
= 30; i
>= 0; i
-= 2)
2145 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2147 /* Push general purpose registers on the stack. Note that we do not need
2148 to push x31 as it represents the xzr register and not the stack
2149 pointer in a STR instruction.
2151 SUB sp, sp, #(31 * 16)
2153 STR x30, [sp, #(30 * 16)]
2158 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2159 for (i
= 30; i
>= 0; i
-= 1)
2160 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2161 offset_memory_operand (i
* 16));
2163 /* Make space for 5 more cells.
2165 SUB sp, sp, #(5 * 16)
2168 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2173 ADD x4, sp, #((32 + 31 + 5) * 16)
2174 STR x4, [sp, #(4 * 16)]
2177 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2178 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2180 /* Save PC (tracepoint address):
2185 STR x3, [sp, #(3 * 16)]
2189 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2190 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2192 /* Save CPSR (NZCV), FPSR and FPCR:
2198 STR x2, [sp, #(2 * 16)]
2199 STR x1, [sp, #(1 * 16)]
2200 STR x0, [sp, #(0 * 16)]
2203 p
+= emit_mrs (p
, x2
, NZCV
);
2204 p
+= emit_mrs (p
, x1
, FPSR
);
2205 p
+= emit_mrs (p
, x0
, FPCR
);
2206 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2207 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2208 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2210 /* Push the collecting_t object. It consist of the address of the
2211 tracepoint and an ID for the current thread. We get the latter by
2212 reading the tpidr_el0 system register. It corresponds to the
2213 NT_ARM_TLS register accessible with ptrace.
2220 STP x0, x1, [sp, #-16]!
2224 p
+= emit_mov_addr (p
, x0
, tpoint
);
2225 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2226 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2230 The shared memory for the lock is at lockaddr. It will hold zero
2231 if no-one is holding the lock, otherwise it contains the address of
2232 the collecting_t object on the stack of the thread which acquired it.
2234 At this stage, the stack pointer points to this thread's collecting_t
2237 We use the following registers:
2238 - x0: Address of the lock.
2239 - x1: Pointer to collecting_t object.
2240 - x2: Scratch register.
2246 ; Trigger an event local to this core. So the following WFE
2247 ; instruction is ignored.
2250 ; Wait for an event. The event is triggered by either the SEVL
2251 ; or STLR instructions (store release).
2254 ; Atomically read at lockaddr. This marks the memory location as
2255 ; exclusive. This instruction also has memory constraints which
2256 ; make sure all previous data reads and writes are done before
2260 ; Try again if another thread holds the lock.
2263 ; We can lock it! Write the address of the collecting_t object.
2264 ; This instruction will fail if the memory location is not marked
2265 ; as exclusive anymore. If it succeeds, it will remove the
2266 ; exclusive mark on the memory location. This way, if another
2267 ; thread executes this instruction before us, we will fail and try
2274 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2275 p
+= emit_mov (p
, x1
, register_operand (sp
));
2279 p
+= emit_ldaxr (p
, x2
, x0
);
2280 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2281 p
+= emit_stxr (p
, w2
, x1
, x0
);
2282 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2284 /* Call collector (struct tracepoint *, unsigned char *):
2289 ; Saved registers start after the collecting_t object.
2292 ; We use an intra-procedure-call scratch register.
2293 MOV ip0, #(collector)
2296 ; And call back to C!
2301 p
+= emit_mov_addr (p
, x0
, tpoint
);
2302 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2304 p
+= emit_mov_addr (p
, ip0
, collector
);
2305 p
+= emit_blr (p
, ip0
);
2307 /* Release the lock.
2312 ; This instruction is a normal store with memory ordering
2313 ; constraints. Thanks to this we do not have to put a data
2314 ; barrier instruction to make sure all data read and writes are done
2315 ; before this instruction is executed. Furthermore, this instruction
2316 ; will trigger an event, letting other threads know they can grab
2321 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2322 p
+= emit_stlr (p
, xzr
, x0
);
2324 /* Free collecting_t object:
2329 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2331 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2332 registers from the stack.
2334 LDR x2, [sp, #(2 * 16)]
2335 LDR x1, [sp, #(1 * 16)]
2336 LDR x0, [sp, #(0 * 16)]
2342 ADD sp, sp #(5 * 16)
2345 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2346 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2347 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2348 p
+= emit_msr (p
, NZCV
, x2
);
2349 p
+= emit_msr (p
, FPSR
, x1
);
2350 p
+= emit_msr (p
, FPCR
, x0
);
2352 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2354 /* Pop general purpose registers:
2358 LDR x30, [sp, #(30 * 16)]
2360 ADD sp, sp, #(31 * 16)
2363 for (i
= 0; i
<= 30; i
+= 1)
2364 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2365 offset_memory_operand (i
* 16));
2366 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2368 /* Pop SIMD&FP registers:
2372 LDP q30, q31, [sp, #(30 * 16)]
2374 ADD sp, sp, #(32 * 16)
2377 for (i
= 0; i
<= 30; i
+= 2)
2378 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2379 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2381 /* Write the code into the inferior memory. */
2382 append_insns (&buildaddr
, p
- buf
, buf
);
2384 /* Now emit the relocated instruction. */
2385 *adjusted_insn_addr
= buildaddr
;
2386 target_read_uint32 (tpaddr
, &insn
);
2388 insn_data
.base
.insn_addr
= tpaddr
;
2389 insn_data
.new_addr
= buildaddr
;
2390 insn_data
.insn_ptr
= buf
;
2392 aarch64_relocate_instruction (insn
, &visitor
,
2393 (struct aarch64_insn_data
*) &insn_data
);
2395 /* We may not have been able to relocate the instruction. */
2396 if (insn_data
.insn_ptr
== buf
)
2399 "E.Could not relocate instruction from %s to %s.",
2400 core_addr_to_string_nz (tpaddr
),
2401 core_addr_to_string_nz (buildaddr
));
2405 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2406 *adjusted_insn_addr_end
= buildaddr
;
2408 /* Go back to the start of the buffer. */
2411 /* Emit a branch back from the jump pad. */
2412 offset
= (tpaddr
+ orig_size
- buildaddr
);
2413 if (!can_encode_int32 (offset
, 28))
2416 "E.Jump back from jump pad too far from tracepoint "
2417 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2422 p
+= emit_b (p
, 0, offset
);
2423 append_insns (&buildaddr
, p
- buf
, buf
);
2425 /* Give the caller a branch instruction into the jump pad. */
2426 offset
= (*jump_entry
- tpaddr
);
2427 if (!can_encode_int32 (offset
, 28))
2430 "E.Jump pad too far from tracepoint "
2431 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2436 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2437 *jjump_pad_insn_size
= 4;
2439 /* Return the end address of our pad. */
2440 *jump_entry
= buildaddr
;
2445 /* Helper function writing LEN instructions from START into
2446 current_insn_ptr. */
2449 emit_ops_insns (const uint32_t *start
, int len
)
2451 CORE_ADDR buildaddr
= current_insn_ptr
;
2454 debug_printf ("Adding %d instrucions at %s\n",
2455 len
, paddress (buildaddr
));
2457 append_insns (&buildaddr
, len
, start
);
2458 current_insn_ptr
= buildaddr
;
2461 /* Pop a register from the stack. */
2464 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2466 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2469 /* Push a register on the stack. */
2472 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2474 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2477 /* Implementation of emit_ops method "emit_prologue". */
2480 aarch64_emit_prologue (void)
2485 /* This function emit a prologue for the following function prototype:
2487 enum eval_result_type f (unsigned char *regs,
2490 The first argument is a buffer of raw registers. The second
2491 argument is the result of
2492 evaluating the expression, which will be set to whatever is on top of
2493 the stack at the end.
2495 The stack set up by the prologue is as such:
2497 High *------------------------------------------------------*
2500 | x1 (ULONGEST *value) |
2501 | x0 (unsigned char *regs) |
2502 Low *------------------------------------------------------*
2504 As we are implementing a stack machine, each opcode can expand the
2505 stack so we never know how far we are from the data saved by this
2506 prologue. In order to be able refer to value and regs later, we save
2507 the current stack pointer in the frame pointer. This way, it is not
2508 clobbered when calling C functions.
2510 Finally, throughout every operation, we are using register x0 as the
2511 top of the stack, and x1 as a scratch register. */
2513 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2514 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2515 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2517 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2520 emit_ops_insns (buf
, p
- buf
);
2523 /* Implementation of emit_ops method "emit_epilogue". */
2526 aarch64_emit_epilogue (void)
2531 /* Store the result of the expression (x0) in *value. */
2532 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2533 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2534 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2536 /* Restore the previous state. */
2537 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2538 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2540 /* Return expr_eval_no_error. */
2541 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2542 p
+= emit_ret (p
, lr
);
2544 emit_ops_insns (buf
, p
- buf
);
2547 /* Implementation of emit_ops method "emit_add". */
2550 aarch64_emit_add (void)
2555 p
+= emit_pop (p
, x1
);
2556 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2558 emit_ops_insns (buf
, p
- buf
);
2561 /* Implementation of emit_ops method "emit_sub". */
2564 aarch64_emit_sub (void)
2569 p
+= emit_pop (p
, x1
);
2570 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2572 emit_ops_insns (buf
, p
- buf
);
2575 /* Implementation of emit_ops method "emit_mul". */
2578 aarch64_emit_mul (void)
2583 p
+= emit_pop (p
, x1
);
2584 p
+= emit_mul (p
, x0
, x1
, x0
);
2586 emit_ops_insns (buf
, p
- buf
);
2589 /* Implementation of emit_ops method "emit_lsh". */
2592 aarch64_emit_lsh (void)
2597 p
+= emit_pop (p
, x1
);
2598 p
+= emit_lslv (p
, x0
, x1
, x0
);
2600 emit_ops_insns (buf
, p
- buf
);
2603 /* Implementation of emit_ops method "emit_rsh_signed". */
2606 aarch64_emit_rsh_signed (void)
2611 p
+= emit_pop (p
, x1
);
2612 p
+= emit_asrv (p
, x0
, x1
, x0
);
2614 emit_ops_insns (buf
, p
- buf
);
2617 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2620 aarch64_emit_rsh_unsigned (void)
2625 p
+= emit_pop (p
, x1
);
2626 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2628 emit_ops_insns (buf
, p
- buf
);
2631 /* Implementation of emit_ops method "emit_ext". */
2634 aarch64_emit_ext (int arg
)
2639 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2641 emit_ops_insns (buf
, p
- buf
);
2644 /* Implementation of emit_ops method "emit_log_not". */
2647 aarch64_emit_log_not (void)
2652 /* If the top of the stack is 0, replace it with 1. Else replace it with
2655 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2656 p
+= emit_cset (p
, x0
, EQ
);
2658 emit_ops_insns (buf
, p
- buf
);
2661 /* Implementation of emit_ops method "emit_bit_and". */
2664 aarch64_emit_bit_and (void)
2669 p
+= emit_pop (p
, x1
);
2670 p
+= emit_and (p
, x0
, x0
, x1
);
2672 emit_ops_insns (buf
, p
- buf
);
2675 /* Implementation of emit_ops method "emit_bit_or". */
2678 aarch64_emit_bit_or (void)
2683 p
+= emit_pop (p
, x1
);
2684 p
+= emit_orr (p
, x0
, x0
, x1
);
2686 emit_ops_insns (buf
, p
- buf
);
2689 /* Implementation of emit_ops method "emit_bit_xor". */
2692 aarch64_emit_bit_xor (void)
2697 p
+= emit_pop (p
, x1
);
2698 p
+= emit_eor (p
, x0
, x0
, x1
);
2700 emit_ops_insns (buf
, p
- buf
);
2703 /* Implementation of emit_ops method "emit_bit_not". */
2706 aarch64_emit_bit_not (void)
2711 p
+= emit_mvn (p
, x0
, x0
);
2713 emit_ops_insns (buf
, p
- buf
);
2716 /* Implementation of emit_ops method "emit_equal". */
2719 aarch64_emit_equal (void)
2724 p
+= emit_pop (p
, x1
);
2725 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2726 p
+= emit_cset (p
, x0
, EQ
);
2728 emit_ops_insns (buf
, p
- buf
);
2731 /* Implementation of emit_ops method "emit_less_signed". */
2734 aarch64_emit_less_signed (void)
2739 p
+= emit_pop (p
, x1
);
2740 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2741 p
+= emit_cset (p
, x0
, LT
);
2743 emit_ops_insns (buf
, p
- buf
);
2746 /* Implementation of emit_ops method "emit_less_unsigned". */
2749 aarch64_emit_less_unsigned (void)
2754 p
+= emit_pop (p
, x1
);
2755 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2756 p
+= emit_cset (p
, x0
, LO
);
2758 emit_ops_insns (buf
, p
- buf
);
2761 /* Implementation of emit_ops method "emit_ref". */
2764 aarch64_emit_ref (int size
)
2772 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2775 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2778 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2781 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2784 /* Unknown size, bail on compilation. */
2789 emit_ops_insns (buf
, p
- buf
);
2792 /* Implementation of emit_ops method "emit_if_goto". */
2795 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2800 /* The Z flag is set or cleared here. */
2801 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2802 /* This instruction must not change the Z flag. */
2803 p
+= emit_pop (p
, x0
);
2804 /* Branch over the next instruction if x0 == 0. */
2805 p
+= emit_bcond (p
, EQ
, 8);
2807 /* The NOP instruction will be patched with an unconditional branch. */
2809 *offset_p
= (p
- buf
) * 4;
2814 emit_ops_insns (buf
, p
- buf
);
2817 /* Implementation of emit_ops method "emit_goto". */
2820 aarch64_emit_goto (int *offset_p
, int *size_p
)
2825 /* The NOP instruction will be patched with an unconditional branch. */
2832 emit_ops_insns (buf
, p
- buf
);
2835 /* Implementation of emit_ops method "write_goto_address". */
2838 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2842 emit_b (&insn
, 0, to
- from
);
2843 append_insns (&from
, 1, &insn
);
2846 /* Implementation of emit_ops method "emit_const". */
2849 aarch64_emit_const (LONGEST num
)
2854 p
+= emit_mov_addr (p
, x0
, num
);
2856 emit_ops_insns (buf
, p
- buf
);
2859 /* Implementation of emit_ops method "emit_call". */
2862 aarch64_emit_call (CORE_ADDR fn
)
2867 p
+= emit_mov_addr (p
, ip0
, fn
);
2868 p
+= emit_blr (p
, ip0
);
2870 emit_ops_insns (buf
, p
- buf
);
2873 /* Implementation of emit_ops method "emit_reg". */
2876 aarch64_emit_reg (int reg
)
2881 /* Set x0 to unsigned char *regs. */
2882 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2883 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2884 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2886 emit_ops_insns (buf
, p
- buf
);
2888 aarch64_emit_call (get_raw_reg_func_addr ());
2891 /* Implementation of emit_ops method "emit_pop". */
2894 aarch64_emit_pop (void)
2899 p
+= emit_pop (p
, x0
);
2901 emit_ops_insns (buf
, p
- buf
);
2904 /* Implementation of emit_ops method "emit_stack_flush". */
2907 aarch64_emit_stack_flush (void)
2912 p
+= emit_push (p
, x0
);
2914 emit_ops_insns (buf
, p
- buf
);
2917 /* Implementation of emit_ops method "emit_zero_ext". */
2920 aarch64_emit_zero_ext (int arg
)
2925 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2927 emit_ops_insns (buf
, p
- buf
);
2930 /* Implementation of emit_ops method "emit_swap". */
2933 aarch64_emit_swap (void)
2938 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2939 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2940 p
+= emit_mov (p
, x0
, register_operand (x1
));
2942 emit_ops_insns (buf
, p
- buf
);
2945 /* Implementation of emit_ops method "emit_stack_adjust". */
2948 aarch64_emit_stack_adjust (int n
)
2950 /* This is not needed with our design. */
2954 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2956 emit_ops_insns (buf
, p
- buf
);
2959 /* Implementation of emit_ops method "emit_int_call_1". */
2962 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2967 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2969 emit_ops_insns (buf
, p
- buf
);
2971 aarch64_emit_call (fn
);
2974 /* Implementation of emit_ops method "emit_void_call_2". */
2977 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2982 /* Push x0 on the stack. */
2983 aarch64_emit_stack_flush ();
2985 /* Setup arguments for the function call:
2988 x1: top of the stack
2993 p
+= emit_mov (p
, x1
, register_operand (x0
));
2994 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2996 emit_ops_insns (buf
, p
- buf
);
2998 aarch64_emit_call (fn
);
3001 aarch64_emit_pop ();
3004 /* Implementation of emit_ops method "emit_eq_goto". */
3007 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
3012 p
+= emit_pop (p
, x1
);
3013 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3014 /* Branch over the next instruction if x0 != x1. */
3015 p
+= emit_bcond (p
, NE
, 8);
3016 /* The NOP instruction will be patched with an unconditional branch. */
3018 *offset_p
= (p
- buf
) * 4;
3023 emit_ops_insns (buf
, p
- buf
);
3026 /* Implementation of emit_ops method "emit_ne_goto". */
3029 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
3034 p
+= emit_pop (p
, x1
);
3035 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3036 /* Branch over the next instruction if x0 == x1. */
3037 p
+= emit_bcond (p
, EQ
, 8);
3038 /* The NOP instruction will be patched with an unconditional branch. */
3040 *offset_p
= (p
- buf
) * 4;
3045 emit_ops_insns (buf
, p
- buf
);
3048 /* Implementation of emit_ops method "emit_lt_goto". */
3051 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
3056 p
+= emit_pop (p
, x1
);
3057 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3058 /* Branch over the next instruction if x0 >= x1. */
3059 p
+= emit_bcond (p
, GE
, 8);
3060 /* The NOP instruction will be patched with an unconditional branch. */
3062 *offset_p
= (p
- buf
) * 4;
3067 emit_ops_insns (buf
, p
- buf
);
3070 /* Implementation of emit_ops method "emit_le_goto". */
3073 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
3078 p
+= emit_pop (p
, x1
);
3079 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3080 /* Branch over the next instruction if x0 > x1. */
3081 p
+= emit_bcond (p
, GT
, 8);
3082 /* The NOP instruction will be patched with an unconditional branch. */
3084 *offset_p
= (p
- buf
) * 4;
3089 emit_ops_insns (buf
, p
- buf
);
3092 /* Implementation of emit_ops method "emit_gt_goto". */
3095 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3100 p
+= emit_pop (p
, x1
);
3101 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3102 /* Branch over the next instruction if x0 <= x1. */
3103 p
+= emit_bcond (p
, LE
, 8);
3104 /* The NOP instruction will be patched with an unconditional branch. */
3106 *offset_p
= (p
- buf
) * 4;
3111 emit_ops_insns (buf
, p
- buf
);
3114 /* Implementation of emit_ops method "emit_ge_got". */
3117 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3122 p
+= emit_pop (p
, x1
);
3123 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3124 /* Branch over the next instruction if x0 <= x1. */
3125 p
+= emit_bcond (p
, LT
, 8);
3126 /* The NOP instruction will be patched with an unconditional branch. */
3128 *offset_p
= (p
- buf
) * 4;
3133 emit_ops_insns (buf
, p
- buf
);
3136 static struct emit_ops aarch64_emit_ops_impl
=
3138 aarch64_emit_prologue
,
3139 aarch64_emit_epilogue
,
3144 aarch64_emit_rsh_signed
,
3145 aarch64_emit_rsh_unsigned
,
3147 aarch64_emit_log_not
,
3148 aarch64_emit_bit_and
,
3149 aarch64_emit_bit_or
,
3150 aarch64_emit_bit_xor
,
3151 aarch64_emit_bit_not
,
3153 aarch64_emit_less_signed
,
3154 aarch64_emit_less_unsigned
,
3156 aarch64_emit_if_goto
,
3158 aarch64_write_goto_address
,
3163 aarch64_emit_stack_flush
,
3164 aarch64_emit_zero_ext
,
3166 aarch64_emit_stack_adjust
,
3167 aarch64_emit_int_call_1
,
3168 aarch64_emit_void_call_2
,
3169 aarch64_emit_eq_goto
,
3170 aarch64_emit_ne_goto
,
3171 aarch64_emit_lt_goto
,
3172 aarch64_emit_le_goto
,
3173 aarch64_emit_gt_goto
,
3174 aarch64_emit_ge_got
,
3177 /* Implementation of target ops method "emit_ops". */
3180 aarch64_target::emit_ops ()
3182 return &aarch64_emit_ops_impl
;
3185 /* Implementation of target ops method
3186 "get_min_fast_tracepoint_insn_len". */
3189 aarch64_target::get_min_fast_tracepoint_insn_len ()
3194 /* Implementation of linux target ops method "low_supports_range_stepping". */
3197 aarch64_target::low_supports_range_stepping ()
3202 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3205 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3207 if (is_64bit_tdesc ())
3209 *size
= aarch64_breakpoint_len
;
3210 return aarch64_breakpoint
;
3213 return arm_sw_breakpoint_from_kind (kind
, size
);
3216 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3219 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3221 if (is_64bit_tdesc ())
3222 return aarch64_breakpoint_len
;
3224 return arm_breakpoint_kind_from_pc (pcptr
);
3227 /* Implementation of the target ops method
3228 "breakpoint_kind_from_current_state". */
3231 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3233 if (is_64bit_tdesc ())
3234 return aarch64_breakpoint_len
;
3236 return arm_breakpoint_kind_from_current_state (pcptr
);
3239 /* Returns true if memory tagging is supported. */
3241 aarch64_target::supports_memory_tagging ()
3243 if (current_thread
== NULL
)
3245 /* We don't have any processes running, so don't attempt to
3246 use linux_get_hwcap2 as it will try to fetch the current
3247 thread id. Instead, just fetch the auxv from the self
3249 #ifdef HAVE_GETAUXVAL
3250 return (getauxval (AT_HWCAP2
) & HWCAP2_MTE
) != 0;
3256 return (linux_get_hwcap2 (8) & HWCAP2_MTE
) != 0;
3260 aarch64_target::fetch_memtags (CORE_ADDR address
, size_t len
,
3261 gdb::byte_vector
&tags
, int type
)
3263 /* Allocation tags are per-process, so any tid is fine. */
3264 int tid
= lwpid_of (current_thread
);
3266 /* Allocation tag? */
3267 if (type
== static_cast <int> (aarch64_memtag_type::mte_allocation
))
3268 return aarch64_mte_fetch_memtags (tid
, address
, len
, tags
);
3274 aarch64_target::store_memtags (CORE_ADDR address
, size_t len
,
3275 const gdb::byte_vector
&tags
, int type
)
3277 /* Allocation tags are per-process, so any tid is fine. */
3278 int tid
= lwpid_of (current_thread
);
3280 /* Allocation tag? */
3281 if (type
== static_cast <int> (aarch64_memtag_type::mte_allocation
))
3282 return aarch64_mte_store_memtags (tid
, address
, len
, tags
);
3287 /* The linux target ops object. */
3289 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3292 initialize_low_arch (void)
3294 initialize_low_arch_aarch32 ();
3296 initialize_regsets_info (&aarch64_regsets_info
);
3297 initialize_regsets_info (&aarch64_sve_regsets_info
);