1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
68 bool supports_tracepoints () override
;
70 bool supports_fast_tracepoints () override
;
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
74 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
75 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
76 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
77 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
80 int get_min_fast_tracepoint_insn_len () override
;
82 struct emit_ops
*emit_ops () override
;
86 void low_arch_setup () override
;
88 bool low_cannot_fetch_register (int regno
) override
;
90 bool low_cannot_store_register (int regno
) override
;
92 bool low_supports_breakpoints () override
;
94 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
96 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
98 bool low_breakpoint_at (CORE_ADDR pc
) override
;
100 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
101 int size
, raw_breakpoint
*bp
) override
;
103 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
104 int size
, raw_breakpoint
*bp
) override
;
106 bool low_stopped_by_watchpoint () override
;
108 CORE_ADDR
low_stopped_data_address () override
;
110 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
111 int direction
) override
;
113 arch_process_info
*low_new_process () override
;
115 void low_delete_process (arch_process_info
*info
) override
;
117 void low_new_thread (lwp_info
*) override
;
119 void low_delete_thread (arch_lwp_info
*) override
;
121 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
123 void low_prepare_to_resume (lwp_info
*lwp
) override
;
125 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
127 bool low_supports_range_stepping () override
;
130 /* The singleton target ops object. */
132 static aarch64_target the_aarch64_target
;
135 aarch64_target::low_cannot_fetch_register (int regno
)
137 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
138 "is not implemented by the target");
142 aarch64_target::low_cannot_store_register (int regno
)
144 gdb_assert_not_reached ("linux target op low_cannot_store_register "
145 "is not implemented by the target");
149 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
151 aarch64_linux_prepare_to_resume (lwp
);
154 /* Per-process arch-specific data we want to keep. */
156 struct arch_process_info
158 /* Hardware breakpoint/watchpoint data.
159 The reason for them to be per-process rather than per-thread is
160 due to the lack of information in the gdbserver environment;
161 gdbserver is not told that whether a requested hardware
162 breakpoint/watchpoint is thread specific or not, so it has to set
163 each hw bp/wp for every thread in the current process. The
164 higher level bp/wp management in gdb will resume a thread if a hw
165 bp/wp trap is not expected for it. Since the hw bp/wp setting is
166 same for each thread, it is reasonable for the data to live here.
168 struct aarch64_debug_reg_state debug_reg_state
;
171 /* Return true if the size of register 0 is 8 byte. */
174 is_64bit_tdesc (void)
176 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
178 return register_size (regcache
->tdesc
, 0) == 8;
181 /* Return true if the regcache contains the number of SVE registers. */
186 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
188 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
192 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
194 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
197 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
198 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
199 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
200 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
201 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
205 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
207 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
210 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
211 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
212 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
213 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
214 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
218 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
220 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
223 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
224 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
225 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
226 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
230 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
232 const struct user_fpsimd_state
*regset
233 = (const struct user_fpsimd_state
*) buf
;
236 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
237 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
238 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
239 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
242 /* Store the pauth registers to regcache. */
245 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
247 uint64_t *pauth_regset
= (uint64_t *) buf
;
248 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
253 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
255 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
260 aarch64_target::low_supports_breakpoints ()
265 /* Implementation of linux target ops method "low_get_pc". */
268 aarch64_target::low_get_pc (regcache
*regcache
)
270 if (register_size (regcache
->tdesc
, 0) == 8)
271 return linux_get_pc_64bit (regcache
);
273 return linux_get_pc_32bit (regcache
);
276 /* Implementation of linux target ops method "low_set_pc". */
279 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
281 if (register_size (regcache
->tdesc
, 0) == 8)
282 linux_set_pc_64bit (regcache
, pc
);
284 linux_set_pc_32bit (regcache
, pc
);
287 #define aarch64_breakpoint_len 4
289 /* AArch64 BRK software debug mode instruction.
290 This instruction needs to match gdb/aarch64-tdep.c
291 (aarch64_default_breakpoint). */
292 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
294 /* Implementation of linux target ops method "low_breakpoint_at". */
297 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
299 if (is_64bit_tdesc ())
301 gdb_byte insn
[aarch64_breakpoint_len
];
303 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
304 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
310 return arm_breakpoint_at (where
);
314 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
318 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
320 state
->dr_addr_bp
[i
] = 0;
321 state
->dr_ctrl_bp
[i
] = 0;
322 state
->dr_ref_count_bp
[i
] = 0;
325 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
327 state
->dr_addr_wp
[i
] = 0;
328 state
->dr_ctrl_wp
[i
] = 0;
329 state
->dr_ref_count_wp
[i
] = 0;
333 /* Return the pointer to the debug register state structure in the
334 current process' arch-specific data area. */
336 struct aarch64_debug_reg_state
*
337 aarch64_get_debug_reg_state (pid_t pid
)
339 struct process_info
*proc
= find_process_pid (pid
);
341 return &proc
->priv
->arch_private
->debug_reg_state
;
344 /* Implementation of target ops method "supports_z_point_type". */
347 aarch64_target::supports_z_point_type (char z_type
)
353 case Z_PACKET_WRITE_WP
:
354 case Z_PACKET_READ_WP
:
355 case Z_PACKET_ACCESS_WP
:
362 /* Implementation of linux target ops method "low_insert_point".
364 It actually only records the info of the to-be-inserted bp/wp;
365 the actual insertion will happen when threads are resumed. */
368 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
369 int len
, raw_breakpoint
*bp
)
372 enum target_hw_bp_type targ_type
;
373 struct aarch64_debug_reg_state
*state
374 = aarch64_get_debug_reg_state (pid_of (current_thread
));
377 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
378 (unsigned long) addr
, len
);
380 /* Determine the type from the raw breakpoint type. */
381 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
383 if (targ_type
!= hw_execute
)
385 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
386 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
387 1 /* is_insert */, state
);
395 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
396 instruction. Set it to 2 to correctly encode length bit
397 mask in hardware/watchpoint control register. */
400 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
401 1 /* is_insert */, state
);
405 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
411 /* Implementation of linux target ops method "low_remove_point".
413 It actually only records the info of the to-be-removed bp/wp,
414 the actual removal will be done when threads are resumed. */
417 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
418 int len
, raw_breakpoint
*bp
)
421 enum target_hw_bp_type targ_type
;
422 struct aarch64_debug_reg_state
*state
423 = aarch64_get_debug_reg_state (pid_of (current_thread
));
426 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
427 (unsigned long) addr
, len
);
429 /* Determine the type from the raw breakpoint type. */
430 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
432 /* Set up state pointers. */
433 if (targ_type
!= hw_execute
)
435 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
441 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
442 instruction. Set it to 2 to correctly encode length bit
443 mask in hardware/watchpoint control register. */
446 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
447 0 /* is_insert */, state
);
451 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
457 /* Implementation of linux target ops method "low_stopped_data_address". */
460 aarch64_target::low_stopped_data_address ()
464 struct aarch64_debug_reg_state
*state
;
466 pid
= lwpid_of (current_thread
);
468 /* Get the siginfo. */
469 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
470 return (CORE_ADDR
) 0;
472 /* Need to be a hardware breakpoint/watchpoint trap. */
473 if (siginfo
.si_signo
!= SIGTRAP
474 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
475 return (CORE_ADDR
) 0;
477 /* Check if the address matches any watched address. */
478 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
479 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
481 const unsigned int offset
482 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
483 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
484 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
485 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
486 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
487 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
489 if (state
->dr_ref_count_wp
[i
]
490 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
491 && addr_trap
>= addr_watch_aligned
492 && addr_trap
< addr_watch
+ len
)
494 /* ADDR_TRAP reports the first address of the memory range
495 accessed by the CPU, regardless of what was the memory
496 range watched. Thus, a large CPU access that straddles
497 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
498 ADDR_TRAP that is lower than the
499 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
501 addr: | 4 | 5 | 6 | 7 | 8 |
502 |---- range watched ----|
503 |----------- range accessed ------------|
505 In this case, ADDR_TRAP will be 4.
507 To match a watchpoint known to GDB core, we must never
508 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
509 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
510 positive on kernels older than 4.10. See PR
516 return (CORE_ADDR
) 0;
519 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
522 aarch64_target::low_stopped_by_watchpoint ()
524 return (low_stopped_data_address () != 0);
527 /* Fetch the thread-local storage pointer for libthread_db. */
530 ps_get_thread_area (struct ps_prochandle
*ph
,
531 lwpid_t lwpid
, int idx
, void **base
)
533 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
537 /* Implementation of linux target ops method "low_siginfo_fixup". */
540 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
543 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
544 if (!is_64bit_tdesc ())
547 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
550 aarch64_siginfo_from_compat_siginfo (native
,
551 (struct compat_siginfo
*) inf
);
559 /* Implementation of linux target ops method "low_new_process". */
562 aarch64_target::low_new_process ()
564 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
566 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
571 /* Implementation of linux target ops method "low_delete_process". */
574 aarch64_target::low_delete_process (arch_process_info
*info
)
580 aarch64_target::low_new_thread (lwp_info
*lwp
)
582 aarch64_linux_new_thread (lwp
);
586 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
588 aarch64_linux_delete_thread (arch_lwp
);
591 /* Implementation of linux target ops method "low_new_fork". */
594 aarch64_target::low_new_fork (process_info
*parent
,
597 /* These are allocated by linux_add_process. */
598 gdb_assert (parent
->priv
!= NULL
599 && parent
->priv
->arch_private
!= NULL
);
600 gdb_assert (child
->priv
!= NULL
601 && child
->priv
->arch_private
!= NULL
);
603 /* Linux kernel before 2.6.33 commit
604 72f674d203cd230426437cdcf7dd6f681dad8b0d
605 will inherit hardware debug registers from parent
606 on fork/vfork/clone. Newer Linux kernels create such tasks with
607 zeroed debug registers.
609 GDB core assumes the child inherits the watchpoints/hw
610 breakpoints of the parent, and will remove them all from the
611 forked off process. Copy the debug registers mirrors into the
612 new process so that all breakpoints and watchpoints can be
613 removed together. The debug registers mirror will become zeroed
614 in the end before detaching the forked off process, thus making
615 this compatible with older Linux kernels too. */
617 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
620 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
621 #define AARCH64_HWCAP_PACA (1 << 30)
623 /* Implementation of linux target ops method "low_arch_setup". */
626 aarch64_target::low_arch_setup ()
628 unsigned int machine
;
632 tid
= lwpid_of (current_thread
);
634 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
638 uint64_t vq
= aarch64_sve_get_vq (tid
);
639 unsigned long hwcap
= linux_get_hwcap (8);
640 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
642 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
645 current_process ()->tdesc
= aarch32_linux_read_description ();
647 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
650 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
653 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
655 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
658 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
661 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
663 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
666 static struct regset_info aarch64_regsets
[] =
668 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
669 sizeof (struct user_pt_regs
), GENERAL_REGS
,
670 aarch64_fill_gregset
, aarch64_store_gregset
},
671 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
672 sizeof (struct user_fpsimd_state
), FP_REGS
,
673 aarch64_fill_fpregset
, aarch64_store_fpregset
675 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
676 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
677 NULL
, aarch64_store_pauthregset
},
681 static struct regsets_info aarch64_regsets_info
=
683 aarch64_regsets
, /* regsets */
685 NULL
, /* disabled_regsets */
688 static struct regs_info regs_info_aarch64
=
690 NULL
, /* regset_bitmap */
692 &aarch64_regsets_info
,
695 static struct regset_info aarch64_sve_regsets
[] =
697 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
698 sizeof (struct user_pt_regs
), GENERAL_REGS
,
699 aarch64_fill_gregset
, aarch64_store_gregset
},
700 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
701 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
702 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
704 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
705 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
706 NULL
, aarch64_store_pauthregset
},
710 static struct regsets_info aarch64_sve_regsets_info
=
712 aarch64_sve_regsets
, /* regsets. */
713 0, /* num_regsets. */
714 NULL
, /* disabled_regsets. */
717 static struct regs_info regs_info_aarch64_sve
=
719 NULL
, /* regset_bitmap. */
721 &aarch64_sve_regsets_info
,
724 /* Implementation of linux target ops method "get_regs_info". */
727 aarch64_target::get_regs_info ()
729 if (!is_64bit_tdesc ())
730 return ®s_info_aarch32
;
733 return ®s_info_aarch64_sve
;
735 return ®s_info_aarch64
;
738 /* Implementation of target ops method "supports_tracepoints". */
741 aarch64_target::supports_tracepoints ()
743 if (current_thread
== NULL
)
747 /* We don't support tracepoints on aarch32 now. */
748 return is_64bit_tdesc ();
752 /* Implementation of linux target ops method "low_get_thread_area". */
755 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
760 iovec
.iov_base
= ®
;
761 iovec
.iov_len
= sizeof (reg
);
763 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
771 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
774 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
776 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
782 collect_register_by_name (regcache
, "x8", &l_sysno
);
783 *sysno
= (int) l_sysno
;
786 collect_register_by_name (regcache
, "r7", sysno
);
789 /* List of condition codes that we need. */
791 enum aarch64_condition_codes
802 enum aarch64_operand_type
808 /* Representation of an operand. At this time, it only supports register
809 and immediate types. */
811 struct aarch64_operand
813 /* Type of the operand. */
814 enum aarch64_operand_type type
;
816 /* Value of the operand according to the type. */
820 struct aarch64_register reg
;
824 /* List of registers that we are currently using, we can add more here as
825 we need to use them. */
827 /* General purpose scratch registers (64 bit). */
828 static const struct aarch64_register x0
= { 0, 1 };
829 static const struct aarch64_register x1
= { 1, 1 };
830 static const struct aarch64_register x2
= { 2, 1 };
831 static const struct aarch64_register x3
= { 3, 1 };
832 static const struct aarch64_register x4
= { 4, 1 };
834 /* General purpose scratch registers (32 bit). */
835 static const struct aarch64_register w0
= { 0, 0 };
836 static const struct aarch64_register w2
= { 2, 0 };
838 /* Intra-procedure scratch registers. */
839 static const struct aarch64_register ip0
= { 16, 1 };
841 /* Special purpose registers. */
842 static const struct aarch64_register fp
= { 29, 1 };
843 static const struct aarch64_register lr
= { 30, 1 };
844 static const struct aarch64_register sp
= { 31, 1 };
845 static const struct aarch64_register xzr
= { 31, 1 };
847 /* Dynamically allocate a new register. If we know the register
848 statically, we should make it a global as above instead of using this
851 static struct aarch64_register
852 aarch64_register (unsigned num
, int is64
)
854 return (struct aarch64_register
) { num
, is64
};
857 /* Helper function to create a register operand, for instructions with
858 different types of operands.
861 p += emit_mov (p, x0, register_operand (x1)); */
863 static struct aarch64_operand
864 register_operand (struct aarch64_register reg
)
866 struct aarch64_operand operand
;
868 operand
.type
= OPERAND_REGISTER
;
874 /* Helper function to create an immediate operand, for instructions with
875 different types of operands.
878 p += emit_mov (p, x0, immediate_operand (12)); */
880 static struct aarch64_operand
881 immediate_operand (uint32_t imm
)
883 struct aarch64_operand operand
;
885 operand
.type
= OPERAND_IMMEDIATE
;
891 /* Helper function to create an offset memory operand.
894 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
896 static struct aarch64_memory_operand
897 offset_memory_operand (int32_t offset
)
899 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
902 /* Helper function to create a pre-index memory operand.
905 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
907 static struct aarch64_memory_operand
908 preindex_memory_operand (int32_t index
)
910 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
913 /* Helper function to create a post-index memory operand.
916 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
918 static struct aarch64_memory_operand
919 postindex_memory_operand (int32_t index
)
921 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
924 /* System control registers. These special registers can be written and
925 read with the MRS and MSR instructions.
927 - NZCV: Condition flags. GDB refers to this register under the CPSR
929 - FPSR: Floating-point status register.
930 - FPCR: Floating-point control registers.
931 - TPIDR_EL0: Software thread ID register. */
933 enum aarch64_system_control_registers
935 /* op0 op1 crn crm op2 */
936 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
937 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
938 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
939 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
942 /* Write a BLR instruction into *BUF.
946 RN is the register to branch to. */
949 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
951 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
954 /* Write a RET instruction into *BUF.
958 RN is the register to branch to. */
961 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
963 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
967 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
968 struct aarch64_register rt
,
969 struct aarch64_register rt2
,
970 struct aarch64_register rn
,
971 struct aarch64_memory_operand operand
)
978 opc
= ENCODE (2, 2, 30);
980 opc
= ENCODE (0, 2, 30);
982 switch (operand
.type
)
984 case MEMORY_OPERAND_OFFSET
:
986 pre_index
= ENCODE (1, 1, 24);
987 write_back
= ENCODE (0, 1, 23);
990 case MEMORY_OPERAND_POSTINDEX
:
992 pre_index
= ENCODE (0, 1, 24);
993 write_back
= ENCODE (1, 1, 23);
996 case MEMORY_OPERAND_PREINDEX
:
998 pre_index
= ENCODE (1, 1, 24);
999 write_back
= ENCODE (1, 1, 23);
1006 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1007 | ENCODE (operand
.index
>> 3, 7, 15)
1008 | ENCODE (rt2
.num
, 5, 10)
1009 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1012 /* Write a STP instruction into *BUF.
1014 STP rt, rt2, [rn, #offset]
1015 STP rt, rt2, [rn, #index]!
1016 STP rt, rt2, [rn], #index
1018 RT and RT2 are the registers to store.
1019 RN is the base address register.
1020 OFFSET is the immediate to add to the base address. It is limited to a
1021 -512 .. 504 range (7 bits << 3). */
1024 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1025 struct aarch64_register rt2
, struct aarch64_register rn
,
1026 struct aarch64_memory_operand operand
)
1028 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1031 /* Write a LDP instruction into *BUF.
1033 LDP rt, rt2, [rn, #offset]
1034 LDP rt, rt2, [rn, #index]!
1035 LDP rt, rt2, [rn], #index
1037 RT and RT2 are the registers to store.
1038 RN is the base address register.
1039 OFFSET is the immediate to add to the base address. It is limited to a
1040 -512 .. 504 range (7 bits << 3). */
1043 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1044 struct aarch64_register rt2
, struct aarch64_register rn
,
1045 struct aarch64_memory_operand operand
)
1047 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1050 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1052 LDP qt, qt2, [rn, #offset]
1054 RT and RT2 are the Q registers to store.
1055 RN is the base address register.
1056 OFFSET is the immediate to add to the base address. It is limited to
1057 -1024 .. 1008 range (7 bits << 4). */
1060 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1061 struct aarch64_register rn
, int32_t offset
)
1063 uint32_t opc
= ENCODE (2, 2, 30);
1064 uint32_t pre_index
= ENCODE (1, 1, 24);
1066 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1067 | ENCODE (offset
>> 4, 7, 15)
1068 | ENCODE (rt2
, 5, 10)
1069 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1072 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1074 STP qt, qt2, [rn, #offset]
1076 RT and RT2 are the Q registers to store.
1077 RN is the base address register.
1078 OFFSET is the immediate to add to the base address. It is limited to
1079 -1024 .. 1008 range (7 bits << 4). */
1082 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1083 struct aarch64_register rn
, int32_t offset
)
1085 uint32_t opc
= ENCODE (2, 2, 30);
1086 uint32_t pre_index
= ENCODE (1, 1, 24);
1088 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1089 | ENCODE (offset
>> 4, 7, 15)
1090 | ENCODE (rt2
, 5, 10)
1091 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1094 /* Write a LDRH instruction into *BUF.
1096 LDRH wt, [xn, #offset]
1097 LDRH wt, [xn, #index]!
1098 LDRH wt, [xn], #index
1100 RT is the register to store.
1101 RN is the base address register.
1102 OFFSET is the immediate to add to the base address. It is limited to
1103 0 .. 32760 range (12 bits << 3). */
1106 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1107 struct aarch64_register rn
,
1108 struct aarch64_memory_operand operand
)
1110 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1113 /* Write a LDRB instruction into *BUF.
1115 LDRB wt, [xn, #offset]
1116 LDRB wt, [xn, #index]!
1117 LDRB wt, [xn], #index
1119 RT is the register to store.
1120 RN is the base address register.
1121 OFFSET is the immediate to add to the base address. It is limited to
1122 0 .. 32760 range (12 bits << 3). */
1125 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1126 struct aarch64_register rn
,
1127 struct aarch64_memory_operand operand
)
1129 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1134 /* Write a STR instruction into *BUF.
1136 STR rt, [rn, #offset]
1137 STR rt, [rn, #index]!
1138 STR rt, [rn], #index
1140 RT is the register to store.
1141 RN is the base address register.
1142 OFFSET is the immediate to add to the base address. It is limited to
1143 0 .. 32760 range (12 bits << 3). */
1146 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1147 struct aarch64_register rn
,
1148 struct aarch64_memory_operand operand
)
1150 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1153 /* Helper function emitting an exclusive load or store instruction. */
1156 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1157 enum aarch64_opcodes opcode
,
1158 struct aarch64_register rs
,
1159 struct aarch64_register rt
,
1160 struct aarch64_register rt2
,
1161 struct aarch64_register rn
)
1163 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1164 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1165 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1168 /* Write a LAXR instruction into *BUF.
1172 RT is the destination register.
1173 RN is the base address register. */
1176 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1177 struct aarch64_register rn
)
1179 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1183 /* Write a STXR instruction into *BUF.
1187 RS is the result register, it indicates if the store succeeded or not.
1188 RT is the destination register.
1189 RN is the base address register. */
1192 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1193 struct aarch64_register rt
, struct aarch64_register rn
)
1195 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1199 /* Write a STLR instruction into *BUF.
1203 RT is the register to store.
1204 RN is the base address register. */
1207 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1208 struct aarch64_register rn
)
1210 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1214 /* Helper function for data processing instructions with register sources. */
1217 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1218 struct aarch64_register rd
,
1219 struct aarch64_register rn
,
1220 struct aarch64_register rm
)
1222 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1224 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1225 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1228 /* Helper function for data processing instructions taking either a register
1232 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1233 struct aarch64_register rd
,
1234 struct aarch64_register rn
,
1235 struct aarch64_operand operand
)
1237 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1238 /* The opcode is different for register and immediate source operands. */
1239 uint32_t operand_opcode
;
1241 if (operand
.type
== OPERAND_IMMEDIATE
)
1243 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1244 operand_opcode
= ENCODE (8, 4, 25);
1246 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1247 | ENCODE (operand
.imm
, 12, 10)
1248 | ENCODE (rn
.num
, 5, 5)
1249 | ENCODE (rd
.num
, 5, 0));
1253 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1254 operand_opcode
= ENCODE (5, 4, 25);
1256 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1261 /* Write an ADD instruction into *BUF.
1266 This function handles both an immediate and register add.
1268 RD is the destination register.
1269 RN is the input register.
1270 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1271 OPERAND_REGISTER. */
1274 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1275 struct aarch64_register rn
, struct aarch64_operand operand
)
1277 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1280 /* Write a SUB instruction into *BUF.
1285 This function handles both an immediate and register sub.
1287 RD is the destination register.
1288 RN is the input register.
1289 IMM is the immediate to substract to RN. */
1292 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1293 struct aarch64_register rn
, struct aarch64_operand operand
)
1295 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1298 /* Write a MOV instruction into *BUF.
1303 This function handles both a wide immediate move and a register move,
1304 with the condition that the source register is not xzr. xzr and the
1305 stack pointer share the same encoding and this function only supports
1308 RD is the destination register.
1309 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1310 OPERAND_REGISTER. */
1313 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1314 struct aarch64_operand operand
)
1316 if (operand
.type
== OPERAND_IMMEDIATE
)
1318 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1319 /* Do not shift the immediate. */
1320 uint32_t shift
= ENCODE (0, 2, 21);
1322 return aarch64_emit_insn (buf
, MOV
| size
| shift
1323 | ENCODE (operand
.imm
, 16, 5)
1324 | ENCODE (rd
.num
, 5, 0));
1327 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1330 /* Write a MOVK instruction into *BUF.
1332 MOVK rd, #imm, lsl #shift
1334 RD is the destination register.
1335 IMM is the immediate.
1336 SHIFT is the logical shift left to apply to IMM. */
1339 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1342 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1344 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1345 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1348 /* Write instructions into *BUF in order to move ADDR into a register.
1349 ADDR can be a 64-bit value.
1351 This function will emit a series of MOV and MOVK instructions, such as:
1354 MOVK xd, #(addr >> 16), lsl #16
1355 MOVK xd, #(addr >> 32), lsl #32
1356 MOVK xd, #(addr >> 48), lsl #48 */
1359 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1363 /* The MOV (wide immediate) instruction clears to top bits of the
1365 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1367 if ((addr
>> 16) != 0)
1368 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1372 if ((addr
>> 32) != 0)
1373 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1377 if ((addr
>> 48) != 0)
1378 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1383 /* Write a SUBS instruction into *BUF.
1387 This instruction update the condition flags.
1389 RD is the destination register.
1390 RN and RM are the source registers. */
1393 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1394 struct aarch64_register rn
, struct aarch64_operand operand
)
1396 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1399 /* Write a CMP instruction into *BUF.
1403 This instruction is an alias of SUBS xzr, rn, rm.
1405 RN and RM are the registers to compare. */
1408 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1409 struct aarch64_operand operand
)
1411 return emit_subs (buf
, xzr
, rn
, operand
);
1414 /* Write a AND instruction into *BUF.
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1422 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1423 struct aarch64_register rn
, struct aarch64_register rm
)
1425 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1428 /* Write a ORR instruction into *BUF.
1432 RD is the destination register.
1433 RN and RM are the source registers. */
1436 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1437 struct aarch64_register rn
, struct aarch64_register rm
)
1439 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1442 /* Write a ORN instruction into *BUF.
1446 RD is the destination register.
1447 RN and RM are the source registers. */
1450 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1451 struct aarch64_register rn
, struct aarch64_register rm
)
1453 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1456 /* Write a EOR instruction into *BUF.
1460 RD is the destination register.
1461 RN and RM are the source registers. */
1464 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1465 struct aarch64_register rn
, struct aarch64_register rm
)
1467 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1470 /* Write a MVN instruction into *BUF.
1474 This is an alias for ORN rd, xzr, rm.
1476 RD is the destination register.
1477 RM is the source register. */
1480 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1481 struct aarch64_register rm
)
1483 return emit_orn (buf
, rd
, xzr
, rm
);
1486 /* Write a LSLV instruction into *BUF.
1490 RD is the destination register.
1491 RN and RM are the source registers. */
1494 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1495 struct aarch64_register rn
, struct aarch64_register rm
)
1497 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1500 /* Write a LSRV instruction into *BUF.
1504 RD is the destination register.
1505 RN and RM are the source registers. */
1508 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1509 struct aarch64_register rn
, struct aarch64_register rm
)
1511 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1514 /* Write a ASRV instruction into *BUF.
1518 RD is the destination register.
1519 RN and RM are the source registers. */
1522 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1523 struct aarch64_register rn
, struct aarch64_register rm
)
1525 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1528 /* Write a MUL instruction into *BUF.
1532 RD is the destination register.
1533 RN and RM are the source registers. */
1536 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1537 struct aarch64_register rn
, struct aarch64_register rm
)
1539 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1542 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1546 RT is the destination register.
1547 SYSTEM_REG is special purpose register to read. */
1550 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1551 enum aarch64_system_control_registers system_reg
)
1553 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1554 | ENCODE (rt
.num
, 5, 0));
1557 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1561 SYSTEM_REG is special purpose register to write.
1562 RT is the input register. */
1565 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1566 struct aarch64_register rt
)
1568 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1569 | ENCODE (rt
.num
, 5, 0));
1572 /* Write a SEVL instruction into *BUF.
1574 This is a hint instruction telling the hardware to trigger an event. */
1577 emit_sevl (uint32_t *buf
)
1579 return aarch64_emit_insn (buf
, SEVL
);
1582 /* Write a WFE instruction into *BUF.
1584 This is a hint instruction telling the hardware to wait for an event. */
1587 emit_wfe (uint32_t *buf
)
1589 return aarch64_emit_insn (buf
, WFE
);
1592 /* Write a SBFM instruction into *BUF.
1594 SBFM rd, rn, #immr, #imms
1596 This instruction moves the bits from #immr to #imms into the
1597 destination, sign extending the result.
1599 RD is the destination register.
1600 RN is the source register.
1601 IMMR is the bit number to start at (least significant bit).
1602 IMMS is the bit number to stop at (most significant bit). */
1605 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1606 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1608 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1609 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1611 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1612 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1613 | ENCODE (rd
.num
, 5, 0));
1616 /* Write a SBFX instruction into *BUF.
1618 SBFX rd, rn, #lsb, #width
1620 This instruction moves #width bits from #lsb into the destination, sign
1621 extending the result. This is an alias for:
1623 SBFM rd, rn, #lsb, #(lsb + width - 1)
1625 RD is the destination register.
1626 RN is the source register.
1627 LSB is the bit number to start at (least significant bit).
1628 WIDTH is the number of bits to move. */
1631 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1632 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1634 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1637 /* Write a UBFM instruction into *BUF.
1639 UBFM rd, rn, #immr, #imms
1641 This instruction moves the bits from #immr to #imms into the
1642 destination, extending the result with zeros.
1644 RD is the destination register.
1645 RN is the source register.
1646 IMMR is the bit number to start at (least significant bit).
1647 IMMS is the bit number to stop at (most significant bit). */
1650 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1651 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1653 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1654 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1656 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1657 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1658 | ENCODE (rd
.num
, 5, 0));
1661 /* Write a UBFX instruction into *BUF.
1663 UBFX rd, rn, #lsb, #width
1665 This instruction moves #width bits from #lsb into the destination,
1666 extending the result with zeros. This is an alias for:
1668 UBFM rd, rn, #lsb, #(lsb + width - 1)
1670 RD is the destination register.
1671 RN is the source register.
1672 LSB is the bit number to start at (least significant bit).
1673 WIDTH is the number of bits to move. */
1676 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1677 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1679 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1682 /* Write a CSINC instruction into *BUF.
1684 CSINC rd, rn, rm, cond
1686 This instruction conditionally increments rn or rm and places the result
1687 in rd. rn is chosen is the condition is true.
1689 RD is the destination register.
1690 RN and RM are the source registers.
1691 COND is the encoded condition. */
1694 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1695 struct aarch64_register rn
, struct aarch64_register rm
,
1698 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1700 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1701 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1702 | ENCODE (rd
.num
, 5, 0));
1705 /* Write a CSET instruction into *BUF.
1709 This instruction conditionally write 1 or 0 in the destination register.
1710 1 is written if the condition is true. This is an alias for:
1712 CSINC rd, xzr, xzr, !cond
1714 Note that the condition needs to be inverted.
1716 RD is the destination register.
1717 RN and RM are the source registers.
1718 COND is the encoded condition. */
1721 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1723 /* The least significant bit of the condition needs toggling in order to
1725 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1728 /* Write LEN instructions from BUF into the inferior memory at *TO.
1730 Note instructions are always little endian on AArch64, unlike data. */
1733 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1735 size_t byte_len
= len
* sizeof (uint32_t);
1736 #if (__BYTE_ORDER == __BIG_ENDIAN)
1737 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1740 for (i
= 0; i
< len
; i
++)
1741 le_buf
[i
] = htole32 (buf
[i
]);
1743 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1747 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1753 /* Sub-class of struct aarch64_insn_data, store information of
1754 instruction relocation for fast tracepoint. Visitor can
1755 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1756 the relocated instructions in buffer pointed by INSN_PTR. */
1758 struct aarch64_insn_relocation_data
1760 struct aarch64_insn_data base
;
1762 /* The new address the instruction is relocated to. */
1764 /* Pointer to the buffer of relocated instruction(s). */
1768 /* Implementation of aarch64_insn_visitor method "b". */
1771 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1772 struct aarch64_insn_data
*data
)
1774 struct aarch64_insn_relocation_data
*insn_reloc
1775 = (struct aarch64_insn_relocation_data
*) data
;
1777 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1779 if (can_encode_int32 (new_offset
, 28))
1780 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1783 /* Implementation of aarch64_insn_visitor method "b_cond". */
1786 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1787 struct aarch64_insn_data
*data
)
1789 struct aarch64_insn_relocation_data
*insn_reloc
1790 = (struct aarch64_insn_relocation_data
*) data
;
1792 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1794 if (can_encode_int32 (new_offset
, 21))
1796 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1799 else if (can_encode_int32 (new_offset
, 28))
1801 /* The offset is out of range for a conditional branch
1802 instruction but not for a unconditional branch. We can use
1803 the following instructions instead:
1805 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1806 B NOT_TAKEN ; Else jump over TAKEN and continue.
1813 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1814 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1815 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1819 /* Implementation of aarch64_insn_visitor method "cb". */
1822 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1823 const unsigned rn
, int is64
,
1824 struct aarch64_insn_data
*data
)
1826 struct aarch64_insn_relocation_data
*insn_reloc
1827 = (struct aarch64_insn_relocation_data
*) data
;
1829 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1831 if (can_encode_int32 (new_offset
, 21))
1833 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1834 aarch64_register (rn
, is64
), new_offset
);
1836 else if (can_encode_int32 (new_offset
, 28))
1838 /* The offset is out of range for a compare and branch
1839 instruction but not for a unconditional branch. We can use
1840 the following instructions instead:
1842 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1843 B NOT_TAKEN ; Else jump over TAKEN and continue.
1849 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1850 aarch64_register (rn
, is64
), 8);
1851 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1852 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1856 /* Implementation of aarch64_insn_visitor method "tb". */
1859 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1860 const unsigned rt
, unsigned bit
,
1861 struct aarch64_insn_data
*data
)
1863 struct aarch64_insn_relocation_data
*insn_reloc
1864 = (struct aarch64_insn_relocation_data
*) data
;
1866 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1868 if (can_encode_int32 (new_offset
, 16))
1870 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1871 aarch64_register (rt
, 1), new_offset
);
1873 else if (can_encode_int32 (new_offset
, 28))
1875 /* The offset is out of range for a test bit and branch
1876 instruction but not for a unconditional branch. We can use
1877 the following instructions instead:
1879 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1880 B NOT_TAKEN ; Else jump over TAKEN and continue.
1886 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1887 aarch64_register (rt
, 1), 8);
1888 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1889 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1894 /* Implementation of aarch64_insn_visitor method "adr". */
1897 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1899 struct aarch64_insn_data
*data
)
1901 struct aarch64_insn_relocation_data
*insn_reloc
1902 = (struct aarch64_insn_relocation_data
*) data
;
1903 /* We know exactly the address the ADR{P,} instruction will compute.
1904 We can just write it to the destination register. */
1905 CORE_ADDR address
= data
->insn_addr
+ offset
;
1909 /* Clear the lower 12 bits of the offset to get the 4K page. */
1910 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1911 aarch64_register (rd
, 1),
1915 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1916 aarch64_register (rd
, 1), address
);
1919 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1922 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1923 const unsigned rt
, const int is64
,
1924 struct aarch64_insn_data
*data
)
1926 struct aarch64_insn_relocation_data
*insn_reloc
1927 = (struct aarch64_insn_relocation_data
*) data
;
1928 CORE_ADDR address
= data
->insn_addr
+ offset
;
1930 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1931 aarch64_register (rt
, 1), address
);
1933 /* We know exactly what address to load from, and what register we
1936 MOV xd, #(oldloc + offset)
1937 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1940 LDR xd, [xd] ; or LDRSW xd, [xd]
1945 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1946 aarch64_register (rt
, 1),
1947 aarch64_register (rt
, 1),
1948 offset_memory_operand (0));
1950 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1951 aarch64_register (rt
, is64
),
1952 aarch64_register (rt
, 1),
1953 offset_memory_operand (0));
1956 /* Implementation of aarch64_insn_visitor method "others". */
1959 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1960 struct aarch64_insn_data
*data
)
1962 struct aarch64_insn_relocation_data
*insn_reloc
1963 = (struct aarch64_insn_relocation_data
*) data
;
1965 /* The instruction is not PC relative. Just re-emit it at the new
1967 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1970 static const struct aarch64_insn_visitor visitor
=
1972 aarch64_ftrace_insn_reloc_b
,
1973 aarch64_ftrace_insn_reloc_b_cond
,
1974 aarch64_ftrace_insn_reloc_cb
,
1975 aarch64_ftrace_insn_reloc_tb
,
1976 aarch64_ftrace_insn_reloc_adr
,
1977 aarch64_ftrace_insn_reloc_ldr_literal
,
1978 aarch64_ftrace_insn_reloc_others
,
1982 aarch64_target::supports_fast_tracepoints ()
1987 /* Implementation of target ops method
1988 "install_fast_tracepoint_jump_pad". */
1991 aarch64_target::install_fast_tracepoint_jump_pad
1992 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
1993 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
1994 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
1995 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
1996 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2004 CORE_ADDR buildaddr
= *jump_entry
;
2005 struct aarch64_insn_relocation_data insn_data
;
2007 /* We need to save the current state on the stack both to restore it
2008 later and to collect register values when the tracepoint is hit.
2010 The saved registers are pushed in a layout that needs to be in sync
2011 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2012 the supply_fast_tracepoint_registers function will fill in the
2013 register cache from a pointer to saved registers on the stack we build
2016 For simplicity, we set the size of each cell on the stack to 16 bytes.
2017 This way one cell can hold any register type, from system registers
2018 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2019 has to be 16 bytes aligned anyway.
2021 Note that the CPSR register does not exist on AArch64. Instead we
2022 can access system bits describing the process state with the
2023 MRS/MSR instructions, namely the condition flags. We save them as
2024 if they are part of a CPSR register because that's how GDB
2025 interprets these system bits. At the moment, only the condition
2026 flags are saved in CPSR (NZCV).
2028 Stack layout, each cell is 16 bytes (descending):
2030 High *-------- SIMD&FP registers from 31 down to 0. --------*
2036 *---- General purpose registers from 30 down to 0. ----*
2042 *------------- Special purpose registers. -------------*
2045 | CPSR (NZCV) | 5 cells
2048 *------------- collecting_t object --------------------*
2049 | TPIDR_EL0 | struct tracepoint * |
2050 Low *------------------------------------------------------*
2052 After this stack is set up, we issue a call to the collector, passing
2053 it the saved registers at (SP + 16). */
2055 /* Push SIMD&FP registers on the stack:
2057 SUB sp, sp, #(32 * 16)
2059 STP q30, q31, [sp, #(30 * 16)]
2064 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2065 for (i
= 30; i
>= 0; i
-= 2)
2066 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2068 /* Push general purpose registers on the stack. Note that we do not need
2069 to push x31 as it represents the xzr register and not the stack
2070 pointer in a STR instruction.
2072 SUB sp, sp, #(31 * 16)
2074 STR x30, [sp, #(30 * 16)]
2079 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2080 for (i
= 30; i
>= 0; i
-= 1)
2081 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2082 offset_memory_operand (i
* 16));
2084 /* Make space for 5 more cells.
2086 SUB sp, sp, #(5 * 16)
2089 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2094 ADD x4, sp, #((32 + 31 + 5) * 16)
2095 STR x4, [sp, #(4 * 16)]
2098 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2099 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2101 /* Save PC (tracepoint address):
2106 STR x3, [sp, #(3 * 16)]
2110 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2111 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2113 /* Save CPSR (NZCV), FPSR and FPCR:
2119 STR x2, [sp, #(2 * 16)]
2120 STR x1, [sp, #(1 * 16)]
2121 STR x0, [sp, #(0 * 16)]
2124 p
+= emit_mrs (p
, x2
, NZCV
);
2125 p
+= emit_mrs (p
, x1
, FPSR
);
2126 p
+= emit_mrs (p
, x0
, FPCR
);
2127 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2128 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2129 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2131 /* Push the collecting_t object. It consist of the address of the
2132 tracepoint and an ID for the current thread. We get the latter by
2133 reading the tpidr_el0 system register. It corresponds to the
2134 NT_ARM_TLS register accessible with ptrace.
2141 STP x0, x1, [sp, #-16]!
2145 p
+= emit_mov_addr (p
, x0
, tpoint
);
2146 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2147 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2151 The shared memory for the lock is at lockaddr. It will hold zero
2152 if no-one is holding the lock, otherwise it contains the address of
2153 the collecting_t object on the stack of the thread which acquired it.
2155 At this stage, the stack pointer points to this thread's collecting_t
2158 We use the following registers:
2159 - x0: Address of the lock.
2160 - x1: Pointer to collecting_t object.
2161 - x2: Scratch register.
2167 ; Trigger an event local to this core. So the following WFE
2168 ; instruction is ignored.
2171 ; Wait for an event. The event is triggered by either the SEVL
2172 ; or STLR instructions (store release).
2175 ; Atomically read at lockaddr. This marks the memory location as
2176 ; exclusive. This instruction also has memory constraints which
2177 ; make sure all previous data reads and writes are done before
2181 ; Try again if another thread holds the lock.
2184 ; We can lock it! Write the address of the collecting_t object.
2185 ; This instruction will fail if the memory location is not marked
2186 ; as exclusive anymore. If it succeeds, it will remove the
2187 ; exclusive mark on the memory location. This way, if another
2188 ; thread executes this instruction before us, we will fail and try
2195 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2196 p
+= emit_mov (p
, x1
, register_operand (sp
));
2200 p
+= emit_ldaxr (p
, x2
, x0
);
2201 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2202 p
+= emit_stxr (p
, w2
, x1
, x0
);
2203 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2205 /* Call collector (struct tracepoint *, unsigned char *):
2210 ; Saved registers start after the collecting_t object.
2213 ; We use an intra-procedure-call scratch register.
2214 MOV ip0, #(collector)
2217 ; And call back to C!
2222 p
+= emit_mov_addr (p
, x0
, tpoint
);
2223 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2225 p
+= emit_mov_addr (p
, ip0
, collector
);
2226 p
+= emit_blr (p
, ip0
);
2228 /* Release the lock.
2233 ; This instruction is a normal store with memory ordering
2234 ; constraints. Thanks to this we do not have to put a data
2235 ; barrier instruction to make sure all data read and writes are done
2236 ; before this instruction is executed. Furthermore, this instruction
2237 ; will trigger an event, letting other threads know they can grab
2242 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2243 p
+= emit_stlr (p
, xzr
, x0
);
2245 /* Free collecting_t object:
2250 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2252 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2253 registers from the stack.
2255 LDR x2, [sp, #(2 * 16)]
2256 LDR x1, [sp, #(1 * 16)]
2257 LDR x0, [sp, #(0 * 16)]
2263 ADD sp, sp #(5 * 16)
2266 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2267 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2268 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2269 p
+= emit_msr (p
, NZCV
, x2
);
2270 p
+= emit_msr (p
, FPSR
, x1
);
2271 p
+= emit_msr (p
, FPCR
, x0
);
2273 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2275 /* Pop general purpose registers:
2279 LDR x30, [sp, #(30 * 16)]
2281 ADD sp, sp, #(31 * 16)
2284 for (i
= 0; i
<= 30; i
+= 1)
2285 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2286 offset_memory_operand (i
* 16));
2287 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2289 /* Pop SIMD&FP registers:
2293 LDP q30, q31, [sp, #(30 * 16)]
2295 ADD sp, sp, #(32 * 16)
2298 for (i
= 0; i
<= 30; i
+= 2)
2299 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2300 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2302 /* Write the code into the inferior memory. */
2303 append_insns (&buildaddr
, p
- buf
, buf
);
2305 /* Now emit the relocated instruction. */
2306 *adjusted_insn_addr
= buildaddr
;
2307 target_read_uint32 (tpaddr
, &insn
);
2309 insn_data
.base
.insn_addr
= tpaddr
;
2310 insn_data
.new_addr
= buildaddr
;
2311 insn_data
.insn_ptr
= buf
;
2313 aarch64_relocate_instruction (insn
, &visitor
,
2314 (struct aarch64_insn_data
*) &insn_data
);
2316 /* We may not have been able to relocate the instruction. */
2317 if (insn_data
.insn_ptr
== buf
)
2320 "E.Could not relocate instruction from %s to %s.",
2321 core_addr_to_string_nz (tpaddr
),
2322 core_addr_to_string_nz (buildaddr
));
2326 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2327 *adjusted_insn_addr_end
= buildaddr
;
2329 /* Go back to the start of the buffer. */
2332 /* Emit a branch back from the jump pad. */
2333 offset
= (tpaddr
+ orig_size
- buildaddr
);
2334 if (!can_encode_int32 (offset
, 28))
2337 "E.Jump back from jump pad too far from tracepoint "
2338 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2343 p
+= emit_b (p
, 0, offset
);
2344 append_insns (&buildaddr
, p
- buf
, buf
);
2346 /* Give the caller a branch instruction into the jump pad. */
2347 offset
= (*jump_entry
- tpaddr
);
2348 if (!can_encode_int32 (offset
, 28))
2351 "E.Jump pad too far from tracepoint "
2352 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2357 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2358 *jjump_pad_insn_size
= 4;
2360 /* Return the end address of our pad. */
2361 *jump_entry
= buildaddr
;
2366 /* Helper function writing LEN instructions from START into
2367 current_insn_ptr. */
2370 emit_ops_insns (const uint32_t *start
, int len
)
2372 CORE_ADDR buildaddr
= current_insn_ptr
;
2375 debug_printf ("Adding %d instrucions at %s\n",
2376 len
, paddress (buildaddr
));
2378 append_insns (&buildaddr
, len
, start
);
2379 current_insn_ptr
= buildaddr
;
2382 /* Pop a register from the stack. */
2385 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2387 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2390 /* Push a register on the stack. */
2393 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2395 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2398 /* Implementation of emit_ops method "emit_prologue". */
2401 aarch64_emit_prologue (void)
2406 /* This function emit a prologue for the following function prototype:
2408 enum eval_result_type f (unsigned char *regs,
2411 The first argument is a buffer of raw registers. The second
2412 argument is the result of
2413 evaluating the expression, which will be set to whatever is on top of
2414 the stack at the end.
2416 The stack set up by the prologue is as such:
2418 High *------------------------------------------------------*
2421 | x1 (ULONGEST *value) |
2422 | x0 (unsigned char *regs) |
2423 Low *------------------------------------------------------*
2425 As we are implementing a stack machine, each opcode can expand the
2426 stack so we never know how far we are from the data saved by this
2427 prologue. In order to be able refer to value and regs later, we save
2428 the current stack pointer in the frame pointer. This way, it is not
2429 clobbered when calling C functions.
2431 Finally, throughout every operation, we are using register x0 as the
2432 top of the stack, and x1 as a scratch register. */
2434 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2435 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2436 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2438 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2441 emit_ops_insns (buf
, p
- buf
);
2444 /* Implementation of emit_ops method "emit_epilogue". */
2447 aarch64_emit_epilogue (void)
2452 /* Store the result of the expression (x0) in *value. */
2453 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2454 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2455 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2457 /* Restore the previous state. */
2458 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2459 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2461 /* Return expr_eval_no_error. */
2462 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2463 p
+= emit_ret (p
, lr
);
2465 emit_ops_insns (buf
, p
- buf
);
2468 /* Implementation of emit_ops method "emit_add". */
2471 aarch64_emit_add (void)
2476 p
+= emit_pop (p
, x1
);
2477 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2479 emit_ops_insns (buf
, p
- buf
);
2482 /* Implementation of emit_ops method "emit_sub". */
2485 aarch64_emit_sub (void)
2490 p
+= emit_pop (p
, x1
);
2491 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2493 emit_ops_insns (buf
, p
- buf
);
2496 /* Implementation of emit_ops method "emit_mul". */
2499 aarch64_emit_mul (void)
2504 p
+= emit_pop (p
, x1
);
2505 p
+= emit_mul (p
, x0
, x1
, x0
);
2507 emit_ops_insns (buf
, p
- buf
);
2510 /* Implementation of emit_ops method "emit_lsh". */
2513 aarch64_emit_lsh (void)
2518 p
+= emit_pop (p
, x1
);
2519 p
+= emit_lslv (p
, x0
, x1
, x0
);
2521 emit_ops_insns (buf
, p
- buf
);
2524 /* Implementation of emit_ops method "emit_rsh_signed". */
2527 aarch64_emit_rsh_signed (void)
2532 p
+= emit_pop (p
, x1
);
2533 p
+= emit_asrv (p
, x0
, x1
, x0
);
2535 emit_ops_insns (buf
, p
- buf
);
2538 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2541 aarch64_emit_rsh_unsigned (void)
2546 p
+= emit_pop (p
, x1
);
2547 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2549 emit_ops_insns (buf
, p
- buf
);
2552 /* Implementation of emit_ops method "emit_ext". */
2555 aarch64_emit_ext (int arg
)
2560 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2562 emit_ops_insns (buf
, p
- buf
);
2565 /* Implementation of emit_ops method "emit_log_not". */
2568 aarch64_emit_log_not (void)
2573 /* If the top of the stack is 0, replace it with 1. Else replace it with
2576 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2577 p
+= emit_cset (p
, x0
, EQ
);
2579 emit_ops_insns (buf
, p
- buf
);
2582 /* Implementation of emit_ops method "emit_bit_and". */
2585 aarch64_emit_bit_and (void)
2590 p
+= emit_pop (p
, x1
);
2591 p
+= emit_and (p
, x0
, x0
, x1
);
2593 emit_ops_insns (buf
, p
- buf
);
2596 /* Implementation of emit_ops method "emit_bit_or". */
2599 aarch64_emit_bit_or (void)
2604 p
+= emit_pop (p
, x1
);
2605 p
+= emit_orr (p
, x0
, x0
, x1
);
2607 emit_ops_insns (buf
, p
- buf
);
2610 /* Implementation of emit_ops method "emit_bit_xor". */
2613 aarch64_emit_bit_xor (void)
2618 p
+= emit_pop (p
, x1
);
2619 p
+= emit_eor (p
, x0
, x0
, x1
);
2621 emit_ops_insns (buf
, p
- buf
);
2624 /* Implementation of emit_ops method "emit_bit_not". */
2627 aarch64_emit_bit_not (void)
2632 p
+= emit_mvn (p
, x0
, x0
);
2634 emit_ops_insns (buf
, p
- buf
);
2637 /* Implementation of emit_ops method "emit_equal". */
2640 aarch64_emit_equal (void)
2645 p
+= emit_pop (p
, x1
);
2646 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2647 p
+= emit_cset (p
, x0
, EQ
);
2649 emit_ops_insns (buf
, p
- buf
);
2652 /* Implementation of emit_ops method "emit_less_signed". */
2655 aarch64_emit_less_signed (void)
2660 p
+= emit_pop (p
, x1
);
2661 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2662 p
+= emit_cset (p
, x0
, LT
);
2664 emit_ops_insns (buf
, p
- buf
);
2667 /* Implementation of emit_ops method "emit_less_unsigned". */
2670 aarch64_emit_less_unsigned (void)
2675 p
+= emit_pop (p
, x1
);
2676 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2677 p
+= emit_cset (p
, x0
, LO
);
2679 emit_ops_insns (buf
, p
- buf
);
2682 /* Implementation of emit_ops method "emit_ref". */
2685 aarch64_emit_ref (int size
)
2693 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2696 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2699 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2702 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2705 /* Unknown size, bail on compilation. */
2710 emit_ops_insns (buf
, p
- buf
);
2713 /* Implementation of emit_ops method "emit_if_goto". */
2716 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2721 /* The Z flag is set or cleared here. */
2722 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2723 /* This instruction must not change the Z flag. */
2724 p
+= emit_pop (p
, x0
);
2725 /* Branch over the next instruction if x0 == 0. */
2726 p
+= emit_bcond (p
, EQ
, 8);
2728 /* The NOP instruction will be patched with an unconditional branch. */
2730 *offset_p
= (p
- buf
) * 4;
2735 emit_ops_insns (buf
, p
- buf
);
2738 /* Implementation of emit_ops method "emit_goto". */
2741 aarch64_emit_goto (int *offset_p
, int *size_p
)
2746 /* The NOP instruction will be patched with an unconditional branch. */
2753 emit_ops_insns (buf
, p
- buf
);
2756 /* Implementation of emit_ops method "write_goto_address". */
2759 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2763 emit_b (&insn
, 0, to
- from
);
2764 append_insns (&from
, 1, &insn
);
2767 /* Implementation of emit_ops method "emit_const". */
2770 aarch64_emit_const (LONGEST num
)
2775 p
+= emit_mov_addr (p
, x0
, num
);
2777 emit_ops_insns (buf
, p
- buf
);
2780 /* Implementation of emit_ops method "emit_call". */
2783 aarch64_emit_call (CORE_ADDR fn
)
2788 p
+= emit_mov_addr (p
, ip0
, fn
);
2789 p
+= emit_blr (p
, ip0
);
2791 emit_ops_insns (buf
, p
- buf
);
2794 /* Implementation of emit_ops method "emit_reg". */
2797 aarch64_emit_reg (int reg
)
2802 /* Set x0 to unsigned char *regs. */
2803 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2804 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2805 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2807 emit_ops_insns (buf
, p
- buf
);
2809 aarch64_emit_call (get_raw_reg_func_addr ());
2812 /* Implementation of emit_ops method "emit_pop". */
2815 aarch64_emit_pop (void)
2820 p
+= emit_pop (p
, x0
);
2822 emit_ops_insns (buf
, p
- buf
);
2825 /* Implementation of emit_ops method "emit_stack_flush". */
2828 aarch64_emit_stack_flush (void)
2833 p
+= emit_push (p
, x0
);
2835 emit_ops_insns (buf
, p
- buf
);
2838 /* Implementation of emit_ops method "emit_zero_ext". */
2841 aarch64_emit_zero_ext (int arg
)
2846 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2848 emit_ops_insns (buf
, p
- buf
);
2851 /* Implementation of emit_ops method "emit_swap". */
2854 aarch64_emit_swap (void)
2859 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2860 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2861 p
+= emit_mov (p
, x0
, register_operand (x1
));
2863 emit_ops_insns (buf
, p
- buf
);
2866 /* Implementation of emit_ops method "emit_stack_adjust". */
2869 aarch64_emit_stack_adjust (int n
)
2871 /* This is not needed with our design. */
2875 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2877 emit_ops_insns (buf
, p
- buf
);
2880 /* Implementation of emit_ops method "emit_int_call_1". */
2883 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2888 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2890 emit_ops_insns (buf
, p
- buf
);
2892 aarch64_emit_call (fn
);
2895 /* Implementation of emit_ops method "emit_void_call_2". */
2898 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2903 /* Push x0 on the stack. */
2904 aarch64_emit_stack_flush ();
2906 /* Setup arguments for the function call:
2909 x1: top of the stack
2914 p
+= emit_mov (p
, x1
, register_operand (x0
));
2915 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2917 emit_ops_insns (buf
, p
- buf
);
2919 aarch64_emit_call (fn
);
2922 aarch64_emit_pop ();
2925 /* Implementation of emit_ops method "emit_eq_goto". */
2928 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2933 p
+= emit_pop (p
, x1
);
2934 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2935 /* Branch over the next instruction if x0 != x1. */
2936 p
+= emit_bcond (p
, NE
, 8);
2937 /* The NOP instruction will be patched with an unconditional branch. */
2939 *offset_p
= (p
- buf
) * 4;
2944 emit_ops_insns (buf
, p
- buf
);
2947 /* Implementation of emit_ops method "emit_ne_goto". */
2950 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2955 p
+= emit_pop (p
, x1
);
2956 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2957 /* Branch over the next instruction if x0 == x1. */
2958 p
+= emit_bcond (p
, EQ
, 8);
2959 /* The NOP instruction will be patched with an unconditional branch. */
2961 *offset_p
= (p
- buf
) * 4;
2966 emit_ops_insns (buf
, p
- buf
);
2969 /* Implementation of emit_ops method "emit_lt_goto". */
2972 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2977 p
+= emit_pop (p
, x1
);
2978 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2979 /* Branch over the next instruction if x0 >= x1. */
2980 p
+= emit_bcond (p
, GE
, 8);
2981 /* The NOP instruction will be patched with an unconditional branch. */
2983 *offset_p
= (p
- buf
) * 4;
2988 emit_ops_insns (buf
, p
- buf
);
2991 /* Implementation of emit_ops method "emit_le_goto". */
2994 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2999 p
+= emit_pop (p
, x1
);
3000 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3001 /* Branch over the next instruction if x0 > x1. */
3002 p
+= emit_bcond (p
, GT
, 8);
3003 /* The NOP instruction will be patched with an unconditional branch. */
3005 *offset_p
= (p
- buf
) * 4;
3010 emit_ops_insns (buf
, p
- buf
);
3013 /* Implementation of emit_ops method "emit_gt_goto". */
3016 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3021 p
+= emit_pop (p
, x1
);
3022 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3023 /* Branch over the next instruction if x0 <= x1. */
3024 p
+= emit_bcond (p
, LE
, 8);
3025 /* The NOP instruction will be patched with an unconditional branch. */
3027 *offset_p
= (p
- buf
) * 4;
3032 emit_ops_insns (buf
, p
- buf
);
3035 /* Implementation of emit_ops method "emit_ge_got". */
3038 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3043 p
+= emit_pop (p
, x1
);
3044 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3045 /* Branch over the next instruction if x0 <= x1. */
3046 p
+= emit_bcond (p
, LT
, 8);
3047 /* The NOP instruction will be patched with an unconditional branch. */
3049 *offset_p
= (p
- buf
) * 4;
3054 emit_ops_insns (buf
, p
- buf
);
3057 static struct emit_ops aarch64_emit_ops_impl
=
3059 aarch64_emit_prologue
,
3060 aarch64_emit_epilogue
,
3065 aarch64_emit_rsh_signed
,
3066 aarch64_emit_rsh_unsigned
,
3068 aarch64_emit_log_not
,
3069 aarch64_emit_bit_and
,
3070 aarch64_emit_bit_or
,
3071 aarch64_emit_bit_xor
,
3072 aarch64_emit_bit_not
,
3074 aarch64_emit_less_signed
,
3075 aarch64_emit_less_unsigned
,
3077 aarch64_emit_if_goto
,
3079 aarch64_write_goto_address
,
3084 aarch64_emit_stack_flush
,
3085 aarch64_emit_zero_ext
,
3087 aarch64_emit_stack_adjust
,
3088 aarch64_emit_int_call_1
,
3089 aarch64_emit_void_call_2
,
3090 aarch64_emit_eq_goto
,
3091 aarch64_emit_ne_goto
,
3092 aarch64_emit_lt_goto
,
3093 aarch64_emit_le_goto
,
3094 aarch64_emit_gt_goto
,
3095 aarch64_emit_ge_got
,
3098 /* Implementation of target ops method "emit_ops". */
3101 aarch64_target::emit_ops ()
3103 return &aarch64_emit_ops_impl
;
3106 /* Implementation of target ops method
3107 "get_min_fast_tracepoint_insn_len". */
3110 aarch64_target::get_min_fast_tracepoint_insn_len ()
3115 /* Implementation of linux target ops method "low_supports_range_stepping". */
3118 aarch64_target::low_supports_range_stepping ()
3123 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3126 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3128 if (is_64bit_tdesc ())
3130 *size
= aarch64_breakpoint_len
;
3131 return aarch64_breakpoint
;
3134 return arm_sw_breakpoint_from_kind (kind
, size
);
3137 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3140 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3142 if (is_64bit_tdesc ())
3143 return aarch64_breakpoint_len
;
3145 return arm_breakpoint_kind_from_pc (pcptr
);
3148 /* Implementation of the target ops method
3149 "breakpoint_kind_from_current_state". */
3152 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3154 if (is_64bit_tdesc ())
3155 return aarch64_breakpoint_len
;
3157 return arm_breakpoint_kind_from_current_state (pcptr
);
3160 struct linux_target_ops the_low_target
=
3162 aarch64_get_syscall_trapinfo
,
3165 /* The linux target ops object. */
3167 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3170 initialize_low_arch (void)
3172 initialize_low_arch_aarch32 ();
3174 initialize_regsets_info (&aarch64_regsets_info
);
3175 initialize_regsets_info (&aarch64_sve_regsets_info
);