1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
68 bool supports_tracepoints () override
;
72 void low_arch_setup () override
;
74 bool low_cannot_fetch_register (int regno
) override
;
76 bool low_cannot_store_register (int regno
) override
;
78 bool low_supports_breakpoints () override
;
80 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
82 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
84 bool low_breakpoint_at (CORE_ADDR pc
) override
;
86 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
87 int size
, raw_breakpoint
*bp
) override
;
89 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
90 int size
, raw_breakpoint
*bp
) override
;
92 bool low_stopped_by_watchpoint () override
;
94 CORE_ADDR
low_stopped_data_address () override
;
96 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
97 int direction
) override
;
99 arch_process_info
*low_new_process () override
;
101 void low_delete_process (arch_process_info
*info
) override
;
103 void low_new_thread (lwp_info
*) override
;
105 void low_delete_thread (arch_lwp_info
*) override
;
107 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
109 void low_prepare_to_resume (lwp_info
*lwp
) override
;
111 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
114 /* The singleton target ops object. */
116 static aarch64_target the_aarch64_target
;
119 aarch64_target::low_cannot_fetch_register (int regno
)
121 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
122 "is not implemented by the target");
126 aarch64_target::low_cannot_store_register (int regno
)
128 gdb_assert_not_reached ("linux target op low_cannot_store_register "
129 "is not implemented by the target");
133 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
135 aarch64_linux_prepare_to_resume (lwp
);
138 /* Per-process arch-specific data we want to keep. */
140 struct arch_process_info
142 /* Hardware breakpoint/watchpoint data.
143 The reason for them to be per-process rather than per-thread is
144 due to the lack of information in the gdbserver environment;
145 gdbserver is not told that whether a requested hardware
146 breakpoint/watchpoint is thread specific or not, so it has to set
147 each hw bp/wp for every thread in the current process. The
148 higher level bp/wp management in gdb will resume a thread if a hw
149 bp/wp trap is not expected for it. Since the hw bp/wp setting is
150 same for each thread, it is reasonable for the data to live here.
152 struct aarch64_debug_reg_state debug_reg_state
;
155 /* Return true if the size of register 0 is 8 byte. */
158 is_64bit_tdesc (void)
160 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
162 return register_size (regcache
->tdesc
, 0) == 8;
165 /* Return true if the regcache contains the number of SVE registers. */
170 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
172 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
176 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
178 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
181 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
182 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
183 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
184 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
185 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
189 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
191 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
194 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
195 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
196 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
197 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
198 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
202 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
204 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
207 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
208 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
209 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
210 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
214 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
216 const struct user_fpsimd_state
*regset
217 = (const struct user_fpsimd_state
*) buf
;
220 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
221 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
222 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
223 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
226 /* Store the pauth registers to regcache. */
229 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
231 uint64_t *pauth_regset
= (uint64_t *) buf
;
232 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
237 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
239 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
244 aarch64_target::low_supports_breakpoints ()
249 /* Implementation of linux target ops method "low_get_pc". */
252 aarch64_target::low_get_pc (regcache
*regcache
)
254 if (register_size (regcache
->tdesc
, 0) == 8)
255 return linux_get_pc_64bit (regcache
);
257 return linux_get_pc_32bit (regcache
);
260 /* Implementation of linux target ops method "low_set_pc". */
263 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
265 if (register_size (regcache
->tdesc
, 0) == 8)
266 linux_set_pc_64bit (regcache
, pc
);
268 linux_set_pc_32bit (regcache
, pc
);
271 #define aarch64_breakpoint_len 4
273 /* AArch64 BRK software debug mode instruction.
274 This instruction needs to match gdb/aarch64-tdep.c
275 (aarch64_default_breakpoint). */
276 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
278 /* Implementation of linux target ops method "low_breakpoint_at". */
281 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
283 if (is_64bit_tdesc ())
285 gdb_byte insn
[aarch64_breakpoint_len
];
287 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
288 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
294 return arm_breakpoint_at (where
);
298 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
302 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
304 state
->dr_addr_bp
[i
] = 0;
305 state
->dr_ctrl_bp
[i
] = 0;
306 state
->dr_ref_count_bp
[i
] = 0;
309 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
311 state
->dr_addr_wp
[i
] = 0;
312 state
->dr_ctrl_wp
[i
] = 0;
313 state
->dr_ref_count_wp
[i
] = 0;
317 /* Return the pointer to the debug register state structure in the
318 current process' arch-specific data area. */
320 struct aarch64_debug_reg_state
*
321 aarch64_get_debug_reg_state (pid_t pid
)
323 struct process_info
*proc
= find_process_pid (pid
);
325 return &proc
->priv
->arch_private
->debug_reg_state
;
328 /* Implementation of target ops method "supports_z_point_type". */
331 aarch64_target::supports_z_point_type (char z_type
)
337 case Z_PACKET_WRITE_WP
:
338 case Z_PACKET_READ_WP
:
339 case Z_PACKET_ACCESS_WP
:
346 /* Implementation of linux target ops method "low_insert_point".
348 It actually only records the info of the to-be-inserted bp/wp;
349 the actual insertion will happen when threads are resumed. */
352 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
353 int len
, raw_breakpoint
*bp
)
356 enum target_hw_bp_type targ_type
;
357 struct aarch64_debug_reg_state
*state
358 = aarch64_get_debug_reg_state (pid_of (current_thread
));
361 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
362 (unsigned long) addr
, len
);
364 /* Determine the type from the raw breakpoint type. */
365 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
367 if (targ_type
!= hw_execute
)
369 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
370 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
371 1 /* is_insert */, state
);
379 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
380 instruction. Set it to 2 to correctly encode length bit
381 mask in hardware/watchpoint control register. */
384 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
385 1 /* is_insert */, state
);
389 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
395 /* Implementation of linux target ops method "low_remove_point".
397 It actually only records the info of the to-be-removed bp/wp,
398 the actual removal will be done when threads are resumed. */
401 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
402 int len
, raw_breakpoint
*bp
)
405 enum target_hw_bp_type targ_type
;
406 struct aarch64_debug_reg_state
*state
407 = aarch64_get_debug_reg_state (pid_of (current_thread
));
410 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
411 (unsigned long) addr
, len
);
413 /* Determine the type from the raw breakpoint type. */
414 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
416 /* Set up state pointers. */
417 if (targ_type
!= hw_execute
)
419 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
425 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
426 instruction. Set it to 2 to correctly encode length bit
427 mask in hardware/watchpoint control register. */
430 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
431 0 /* is_insert */, state
);
435 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
441 /* Implementation of linux target ops method "low_stopped_data_address". */
444 aarch64_target::low_stopped_data_address ()
448 struct aarch64_debug_reg_state
*state
;
450 pid
= lwpid_of (current_thread
);
452 /* Get the siginfo. */
453 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
454 return (CORE_ADDR
) 0;
456 /* Need to be a hardware breakpoint/watchpoint trap. */
457 if (siginfo
.si_signo
!= SIGTRAP
458 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
459 return (CORE_ADDR
) 0;
461 /* Check if the address matches any watched address. */
462 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
463 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
465 const unsigned int offset
466 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
467 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
468 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
469 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
470 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
471 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
473 if (state
->dr_ref_count_wp
[i
]
474 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
475 && addr_trap
>= addr_watch_aligned
476 && addr_trap
< addr_watch
+ len
)
478 /* ADDR_TRAP reports the first address of the memory range
479 accessed by the CPU, regardless of what was the memory
480 range watched. Thus, a large CPU access that straddles
481 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
482 ADDR_TRAP that is lower than the
483 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
485 addr: | 4 | 5 | 6 | 7 | 8 |
486 |---- range watched ----|
487 |----------- range accessed ------------|
489 In this case, ADDR_TRAP will be 4.
491 To match a watchpoint known to GDB core, we must never
492 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
493 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
494 positive on kernels older than 4.10. See PR
500 return (CORE_ADDR
) 0;
503 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
506 aarch64_target::low_stopped_by_watchpoint ()
508 return (low_stopped_data_address () != 0);
511 /* Fetch the thread-local storage pointer for libthread_db. */
514 ps_get_thread_area (struct ps_prochandle
*ph
,
515 lwpid_t lwpid
, int idx
, void **base
)
517 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
521 /* Implementation of linux target ops method "low_siginfo_fixup". */
524 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
527 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
528 if (!is_64bit_tdesc ())
531 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
534 aarch64_siginfo_from_compat_siginfo (native
,
535 (struct compat_siginfo
*) inf
);
543 /* Implementation of linux target ops method "low_new_process". */
546 aarch64_target::low_new_process ()
548 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
550 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
555 /* Implementation of linux target ops method "low_delete_process". */
558 aarch64_target::low_delete_process (arch_process_info
*info
)
564 aarch64_target::low_new_thread (lwp_info
*lwp
)
566 aarch64_linux_new_thread (lwp
);
570 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
572 aarch64_linux_delete_thread (arch_lwp
);
575 /* Implementation of linux target ops method "low_new_fork". */
578 aarch64_target::low_new_fork (process_info
*parent
,
581 /* These are allocated by linux_add_process. */
582 gdb_assert (parent
->priv
!= NULL
583 && parent
->priv
->arch_private
!= NULL
);
584 gdb_assert (child
->priv
!= NULL
585 && child
->priv
->arch_private
!= NULL
);
587 /* Linux kernel before 2.6.33 commit
588 72f674d203cd230426437cdcf7dd6f681dad8b0d
589 will inherit hardware debug registers from parent
590 on fork/vfork/clone. Newer Linux kernels create such tasks with
591 zeroed debug registers.
593 GDB core assumes the child inherits the watchpoints/hw
594 breakpoints of the parent, and will remove them all from the
595 forked off process. Copy the debug registers mirrors into the
596 new process so that all breakpoints and watchpoints can be
597 removed together. The debug registers mirror will become zeroed
598 in the end before detaching the forked off process, thus making
599 this compatible with older Linux kernels too. */
601 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
604 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
605 #define AARCH64_HWCAP_PACA (1 << 30)
607 /* Implementation of linux target ops method "low_arch_setup". */
610 aarch64_target::low_arch_setup ()
612 unsigned int machine
;
616 tid
= lwpid_of (current_thread
);
618 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
622 uint64_t vq
= aarch64_sve_get_vq (tid
);
623 unsigned long hwcap
= linux_get_hwcap (8);
624 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
626 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
629 current_process ()->tdesc
= aarch32_linux_read_description ();
631 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
634 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
637 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
639 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
642 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
645 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
647 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
650 static struct regset_info aarch64_regsets
[] =
652 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
653 sizeof (struct user_pt_regs
), GENERAL_REGS
,
654 aarch64_fill_gregset
, aarch64_store_gregset
},
655 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
656 sizeof (struct user_fpsimd_state
), FP_REGS
,
657 aarch64_fill_fpregset
, aarch64_store_fpregset
659 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
660 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
661 NULL
, aarch64_store_pauthregset
},
665 static struct regsets_info aarch64_regsets_info
=
667 aarch64_regsets
, /* regsets */
669 NULL
, /* disabled_regsets */
672 static struct regs_info regs_info_aarch64
=
674 NULL
, /* regset_bitmap */
676 &aarch64_regsets_info
,
679 static struct regset_info aarch64_sve_regsets
[] =
681 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
682 sizeof (struct user_pt_regs
), GENERAL_REGS
,
683 aarch64_fill_gregset
, aarch64_store_gregset
},
684 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
685 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
686 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
688 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
689 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
690 NULL
, aarch64_store_pauthregset
},
694 static struct regsets_info aarch64_sve_regsets_info
=
696 aarch64_sve_regsets
, /* regsets. */
697 0, /* num_regsets. */
698 NULL
, /* disabled_regsets. */
701 static struct regs_info regs_info_aarch64_sve
=
703 NULL
, /* regset_bitmap. */
705 &aarch64_sve_regsets_info
,
708 /* Implementation of linux target ops method "get_regs_info". */
711 aarch64_target::get_regs_info ()
713 if (!is_64bit_tdesc ())
714 return ®s_info_aarch32
;
717 return ®s_info_aarch64_sve
;
719 return ®s_info_aarch64
;
722 /* Implementation of target ops method "supports_tracepoints". */
725 aarch64_target::supports_tracepoints ()
727 if (current_thread
== NULL
)
731 /* We don't support tracepoints on aarch32 now. */
732 return is_64bit_tdesc ();
736 /* Implementation of linux target ops method "low_get_thread_area". */
739 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
744 iovec
.iov_base
= ®
;
745 iovec
.iov_len
= sizeof (reg
);
747 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
755 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
758 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
760 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
766 collect_register_by_name (regcache
, "x8", &l_sysno
);
767 *sysno
= (int) l_sysno
;
770 collect_register_by_name (regcache
, "r7", sysno
);
773 /* List of condition codes that we need. */
775 enum aarch64_condition_codes
786 enum aarch64_operand_type
792 /* Representation of an operand. At this time, it only supports register
793 and immediate types. */
795 struct aarch64_operand
797 /* Type of the operand. */
798 enum aarch64_operand_type type
;
800 /* Value of the operand according to the type. */
804 struct aarch64_register reg
;
808 /* List of registers that we are currently using, we can add more here as
809 we need to use them. */
811 /* General purpose scratch registers (64 bit). */
812 static const struct aarch64_register x0
= { 0, 1 };
813 static const struct aarch64_register x1
= { 1, 1 };
814 static const struct aarch64_register x2
= { 2, 1 };
815 static const struct aarch64_register x3
= { 3, 1 };
816 static const struct aarch64_register x4
= { 4, 1 };
818 /* General purpose scratch registers (32 bit). */
819 static const struct aarch64_register w0
= { 0, 0 };
820 static const struct aarch64_register w2
= { 2, 0 };
822 /* Intra-procedure scratch registers. */
823 static const struct aarch64_register ip0
= { 16, 1 };
825 /* Special purpose registers. */
826 static const struct aarch64_register fp
= { 29, 1 };
827 static const struct aarch64_register lr
= { 30, 1 };
828 static const struct aarch64_register sp
= { 31, 1 };
829 static const struct aarch64_register xzr
= { 31, 1 };
831 /* Dynamically allocate a new register. If we know the register
832 statically, we should make it a global as above instead of using this
835 static struct aarch64_register
836 aarch64_register (unsigned num
, int is64
)
838 return (struct aarch64_register
) { num
, is64
};
841 /* Helper function to create a register operand, for instructions with
842 different types of operands.
845 p += emit_mov (p, x0, register_operand (x1)); */
847 static struct aarch64_operand
848 register_operand (struct aarch64_register reg
)
850 struct aarch64_operand operand
;
852 operand
.type
= OPERAND_REGISTER
;
858 /* Helper function to create an immediate operand, for instructions with
859 different types of operands.
862 p += emit_mov (p, x0, immediate_operand (12)); */
864 static struct aarch64_operand
865 immediate_operand (uint32_t imm
)
867 struct aarch64_operand operand
;
869 operand
.type
= OPERAND_IMMEDIATE
;
875 /* Helper function to create an offset memory operand.
878 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
880 static struct aarch64_memory_operand
881 offset_memory_operand (int32_t offset
)
883 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
886 /* Helper function to create a pre-index memory operand.
889 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
891 static struct aarch64_memory_operand
892 preindex_memory_operand (int32_t index
)
894 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
897 /* Helper function to create a post-index memory operand.
900 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
902 static struct aarch64_memory_operand
903 postindex_memory_operand (int32_t index
)
905 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
908 /* System control registers. These special registers can be written and
909 read with the MRS and MSR instructions.
911 - NZCV: Condition flags. GDB refers to this register under the CPSR
913 - FPSR: Floating-point status register.
914 - FPCR: Floating-point control registers.
915 - TPIDR_EL0: Software thread ID register. */
917 enum aarch64_system_control_registers
919 /* op0 op1 crn crm op2 */
920 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
921 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
922 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
923 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
926 /* Write a BLR instruction into *BUF.
930 RN is the register to branch to. */
933 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
935 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
938 /* Write a RET instruction into *BUF.
942 RN is the register to branch to. */
945 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
947 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
951 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
952 struct aarch64_register rt
,
953 struct aarch64_register rt2
,
954 struct aarch64_register rn
,
955 struct aarch64_memory_operand operand
)
962 opc
= ENCODE (2, 2, 30);
964 opc
= ENCODE (0, 2, 30);
966 switch (operand
.type
)
968 case MEMORY_OPERAND_OFFSET
:
970 pre_index
= ENCODE (1, 1, 24);
971 write_back
= ENCODE (0, 1, 23);
974 case MEMORY_OPERAND_POSTINDEX
:
976 pre_index
= ENCODE (0, 1, 24);
977 write_back
= ENCODE (1, 1, 23);
980 case MEMORY_OPERAND_PREINDEX
:
982 pre_index
= ENCODE (1, 1, 24);
983 write_back
= ENCODE (1, 1, 23);
990 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
991 | ENCODE (operand
.index
>> 3, 7, 15)
992 | ENCODE (rt2
.num
, 5, 10)
993 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
996 /* Write a STP instruction into *BUF.
998 STP rt, rt2, [rn, #offset]
999 STP rt, rt2, [rn, #index]!
1000 STP rt, rt2, [rn], #index
1002 RT and RT2 are the registers to store.
1003 RN is the base address register.
1004 OFFSET is the immediate to add to the base address. It is limited to a
1005 -512 .. 504 range (7 bits << 3). */
1008 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1009 struct aarch64_register rt2
, struct aarch64_register rn
,
1010 struct aarch64_memory_operand operand
)
1012 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1015 /* Write a LDP instruction into *BUF.
1017 LDP rt, rt2, [rn, #offset]
1018 LDP rt, rt2, [rn, #index]!
1019 LDP rt, rt2, [rn], #index
1021 RT and RT2 are the registers to store.
1022 RN is the base address register.
1023 OFFSET is the immediate to add to the base address. It is limited to a
1024 -512 .. 504 range (7 bits << 3). */
1027 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1028 struct aarch64_register rt2
, struct aarch64_register rn
,
1029 struct aarch64_memory_operand operand
)
1031 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1034 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1036 LDP qt, qt2, [rn, #offset]
1038 RT and RT2 are the Q registers to store.
1039 RN is the base address register.
1040 OFFSET is the immediate to add to the base address. It is limited to
1041 -1024 .. 1008 range (7 bits << 4). */
1044 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1045 struct aarch64_register rn
, int32_t offset
)
1047 uint32_t opc
= ENCODE (2, 2, 30);
1048 uint32_t pre_index
= ENCODE (1, 1, 24);
1050 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1051 | ENCODE (offset
>> 4, 7, 15)
1052 | ENCODE (rt2
, 5, 10)
1053 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1056 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1058 STP qt, qt2, [rn, #offset]
1060 RT and RT2 are the Q registers to store.
1061 RN is the base address register.
1062 OFFSET is the immediate to add to the base address. It is limited to
1063 -1024 .. 1008 range (7 bits << 4). */
1066 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1067 struct aarch64_register rn
, int32_t offset
)
1069 uint32_t opc
= ENCODE (2, 2, 30);
1070 uint32_t pre_index
= ENCODE (1, 1, 24);
1072 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1073 | ENCODE (offset
>> 4, 7, 15)
1074 | ENCODE (rt2
, 5, 10)
1075 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1078 /* Write a LDRH instruction into *BUF.
1080 LDRH wt, [xn, #offset]
1081 LDRH wt, [xn, #index]!
1082 LDRH wt, [xn], #index
1084 RT is the register to store.
1085 RN is the base address register.
1086 OFFSET is the immediate to add to the base address. It is limited to
1087 0 .. 32760 range (12 bits << 3). */
1090 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1091 struct aarch64_register rn
,
1092 struct aarch64_memory_operand operand
)
1094 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1097 /* Write a LDRB instruction into *BUF.
1099 LDRB wt, [xn, #offset]
1100 LDRB wt, [xn, #index]!
1101 LDRB wt, [xn], #index
1103 RT is the register to store.
1104 RN is the base address register.
1105 OFFSET is the immediate to add to the base address. It is limited to
1106 0 .. 32760 range (12 bits << 3). */
1109 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1110 struct aarch64_register rn
,
1111 struct aarch64_memory_operand operand
)
1113 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1118 /* Write a STR instruction into *BUF.
1120 STR rt, [rn, #offset]
1121 STR rt, [rn, #index]!
1122 STR rt, [rn], #index
1124 RT is the register to store.
1125 RN is the base address register.
1126 OFFSET is the immediate to add to the base address. It is limited to
1127 0 .. 32760 range (12 bits << 3). */
1130 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1131 struct aarch64_register rn
,
1132 struct aarch64_memory_operand operand
)
1134 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1137 /* Helper function emitting an exclusive load or store instruction. */
1140 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1141 enum aarch64_opcodes opcode
,
1142 struct aarch64_register rs
,
1143 struct aarch64_register rt
,
1144 struct aarch64_register rt2
,
1145 struct aarch64_register rn
)
1147 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1148 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1149 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1152 /* Write a LAXR instruction into *BUF.
1156 RT is the destination register.
1157 RN is the base address register. */
1160 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1161 struct aarch64_register rn
)
1163 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1167 /* Write a STXR instruction into *BUF.
1171 RS is the result register, it indicates if the store succeeded or not.
1172 RT is the destination register.
1173 RN is the base address register. */
1176 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1177 struct aarch64_register rt
, struct aarch64_register rn
)
1179 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1183 /* Write a STLR instruction into *BUF.
1187 RT is the register to store.
1188 RN is the base address register. */
1191 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1192 struct aarch64_register rn
)
1194 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1198 /* Helper function for data processing instructions with register sources. */
1201 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1202 struct aarch64_register rd
,
1203 struct aarch64_register rn
,
1204 struct aarch64_register rm
)
1206 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1208 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1209 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1212 /* Helper function for data processing instructions taking either a register
1216 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1217 struct aarch64_register rd
,
1218 struct aarch64_register rn
,
1219 struct aarch64_operand operand
)
1221 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1222 /* The opcode is different for register and immediate source operands. */
1223 uint32_t operand_opcode
;
1225 if (operand
.type
== OPERAND_IMMEDIATE
)
1227 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1228 operand_opcode
= ENCODE (8, 4, 25);
1230 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1231 | ENCODE (operand
.imm
, 12, 10)
1232 | ENCODE (rn
.num
, 5, 5)
1233 | ENCODE (rd
.num
, 5, 0));
1237 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1238 operand_opcode
= ENCODE (5, 4, 25);
1240 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1245 /* Write an ADD instruction into *BUF.
1250 This function handles both an immediate and register add.
1252 RD is the destination register.
1253 RN is the input register.
1254 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1255 OPERAND_REGISTER. */
1258 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1259 struct aarch64_register rn
, struct aarch64_operand operand
)
1261 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1264 /* Write a SUB instruction into *BUF.
1269 This function handles both an immediate and register sub.
1271 RD is the destination register.
1272 RN is the input register.
1273 IMM is the immediate to substract to RN. */
1276 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1277 struct aarch64_register rn
, struct aarch64_operand operand
)
1279 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1282 /* Write a MOV instruction into *BUF.
1287 This function handles both a wide immediate move and a register move,
1288 with the condition that the source register is not xzr. xzr and the
1289 stack pointer share the same encoding and this function only supports
1292 RD is the destination register.
1293 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1294 OPERAND_REGISTER. */
1297 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1298 struct aarch64_operand operand
)
1300 if (operand
.type
== OPERAND_IMMEDIATE
)
1302 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1303 /* Do not shift the immediate. */
1304 uint32_t shift
= ENCODE (0, 2, 21);
1306 return aarch64_emit_insn (buf
, MOV
| size
| shift
1307 | ENCODE (operand
.imm
, 16, 5)
1308 | ENCODE (rd
.num
, 5, 0));
1311 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1314 /* Write a MOVK instruction into *BUF.
1316 MOVK rd, #imm, lsl #shift
1318 RD is the destination register.
1319 IMM is the immediate.
1320 SHIFT is the logical shift left to apply to IMM. */
1323 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1326 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1328 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1329 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1332 /* Write instructions into *BUF in order to move ADDR into a register.
1333 ADDR can be a 64-bit value.
1335 This function will emit a series of MOV and MOVK instructions, such as:
1338 MOVK xd, #(addr >> 16), lsl #16
1339 MOVK xd, #(addr >> 32), lsl #32
1340 MOVK xd, #(addr >> 48), lsl #48 */
1343 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1347 /* The MOV (wide immediate) instruction clears to top bits of the
1349 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1351 if ((addr
>> 16) != 0)
1352 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1356 if ((addr
>> 32) != 0)
1357 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1361 if ((addr
>> 48) != 0)
1362 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1367 /* Write a SUBS instruction into *BUF.
1371 This instruction update the condition flags.
1373 RD is the destination register.
1374 RN and RM are the source registers. */
1377 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1378 struct aarch64_register rn
, struct aarch64_operand operand
)
1380 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1383 /* Write a CMP instruction into *BUF.
1387 This instruction is an alias of SUBS xzr, rn, rm.
1389 RN and RM are the registers to compare. */
1392 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1393 struct aarch64_operand operand
)
1395 return emit_subs (buf
, xzr
, rn
, operand
);
1398 /* Write a AND instruction into *BUF.
1402 RD is the destination register.
1403 RN and RM are the source registers. */
1406 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1407 struct aarch64_register rn
, struct aarch64_register rm
)
1409 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1412 /* Write a ORR instruction into *BUF.
1416 RD is the destination register.
1417 RN and RM are the source registers. */
1420 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1421 struct aarch64_register rn
, struct aarch64_register rm
)
1423 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1426 /* Write a ORN instruction into *BUF.
1430 RD is the destination register.
1431 RN and RM are the source registers. */
1434 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1435 struct aarch64_register rn
, struct aarch64_register rm
)
1437 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1440 /* Write a EOR instruction into *BUF.
1444 RD is the destination register.
1445 RN and RM are the source registers. */
1448 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1449 struct aarch64_register rn
, struct aarch64_register rm
)
1451 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1454 /* Write a MVN instruction into *BUF.
1458 This is an alias for ORN rd, xzr, rm.
1460 RD is the destination register.
1461 RM is the source register. */
1464 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1465 struct aarch64_register rm
)
1467 return emit_orn (buf
, rd
, xzr
, rm
);
1470 /* Write a LSLV instruction into *BUF.
1474 RD is the destination register.
1475 RN and RM are the source registers. */
1478 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1479 struct aarch64_register rn
, struct aarch64_register rm
)
1481 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1484 /* Write a LSRV instruction into *BUF.
1488 RD is the destination register.
1489 RN and RM are the source registers. */
1492 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1493 struct aarch64_register rn
, struct aarch64_register rm
)
1495 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1498 /* Write a ASRV instruction into *BUF.
1502 RD is the destination register.
1503 RN and RM are the source registers. */
1506 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1507 struct aarch64_register rn
, struct aarch64_register rm
)
1509 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1512 /* Write a MUL instruction into *BUF.
1516 RD is the destination register.
1517 RN and RM are the source registers. */
1520 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1521 struct aarch64_register rn
, struct aarch64_register rm
)
1523 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1526 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1530 RT is the destination register.
1531 SYSTEM_REG is special purpose register to read. */
1534 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1535 enum aarch64_system_control_registers system_reg
)
1537 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1538 | ENCODE (rt
.num
, 5, 0));
1541 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1545 SYSTEM_REG is special purpose register to write.
1546 RT is the input register. */
1549 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1550 struct aarch64_register rt
)
1552 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1553 | ENCODE (rt
.num
, 5, 0));
1556 /* Write a SEVL instruction into *BUF.
1558 This is a hint instruction telling the hardware to trigger an event. */
1561 emit_sevl (uint32_t *buf
)
1563 return aarch64_emit_insn (buf
, SEVL
);
1566 /* Write a WFE instruction into *BUF.
1568 This is a hint instruction telling the hardware to wait for an event. */
1571 emit_wfe (uint32_t *buf
)
1573 return aarch64_emit_insn (buf
, WFE
);
1576 /* Write a SBFM instruction into *BUF.
1578 SBFM rd, rn, #immr, #imms
1580 This instruction moves the bits from #immr to #imms into the
1581 destination, sign extending the result.
1583 RD is the destination register.
1584 RN is the source register.
1585 IMMR is the bit number to start at (least significant bit).
1586 IMMS is the bit number to stop at (most significant bit). */
1589 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1590 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1592 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1593 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1595 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1596 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1597 | ENCODE (rd
.num
, 5, 0));
1600 /* Write a SBFX instruction into *BUF.
1602 SBFX rd, rn, #lsb, #width
1604 This instruction moves #width bits from #lsb into the destination, sign
1605 extending the result. This is an alias for:
1607 SBFM rd, rn, #lsb, #(lsb + width - 1)
1609 RD is the destination register.
1610 RN is the source register.
1611 LSB is the bit number to start at (least significant bit).
1612 WIDTH is the number of bits to move. */
1615 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1616 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1618 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1621 /* Write a UBFM instruction into *BUF.
1623 UBFM rd, rn, #immr, #imms
1625 This instruction moves the bits from #immr to #imms into the
1626 destination, extending the result with zeros.
1628 RD is the destination register.
1629 RN is the source register.
1630 IMMR is the bit number to start at (least significant bit).
1631 IMMS is the bit number to stop at (most significant bit). */
1634 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1635 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1637 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1638 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1640 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1641 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1642 | ENCODE (rd
.num
, 5, 0));
1645 /* Write a UBFX instruction into *BUF.
1647 UBFX rd, rn, #lsb, #width
1649 This instruction moves #width bits from #lsb into the destination,
1650 extending the result with zeros. This is an alias for:
1652 UBFM rd, rn, #lsb, #(lsb + width - 1)
1654 RD is the destination register.
1655 RN is the source register.
1656 LSB is the bit number to start at (least significant bit).
1657 WIDTH is the number of bits to move. */
1660 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1661 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1663 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1666 /* Write a CSINC instruction into *BUF.
1668 CSINC rd, rn, rm, cond
1670 This instruction conditionally increments rn or rm and places the result
1671 in rd. rn is chosen is the condition is true.
1673 RD is the destination register.
1674 RN and RM are the source registers.
1675 COND is the encoded condition. */
1678 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1679 struct aarch64_register rn
, struct aarch64_register rm
,
1682 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1684 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1685 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1686 | ENCODE (rd
.num
, 5, 0));
1689 /* Write a CSET instruction into *BUF.
1693 This instruction conditionally write 1 or 0 in the destination register.
1694 1 is written if the condition is true. This is an alias for:
1696 CSINC rd, xzr, xzr, !cond
1698 Note that the condition needs to be inverted.
1700 RD is the destination register.
1701 RN and RM are the source registers.
1702 COND is the encoded condition. */
1705 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1707 /* The least significant bit of the condition needs toggling in order to
1709 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1712 /* Write LEN instructions from BUF into the inferior memory at *TO.
1714 Note instructions are always little endian on AArch64, unlike data. */
1717 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1719 size_t byte_len
= len
* sizeof (uint32_t);
1720 #if (__BYTE_ORDER == __BIG_ENDIAN)
1721 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1724 for (i
= 0; i
< len
; i
++)
1725 le_buf
[i
] = htole32 (buf
[i
]);
1727 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1731 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1737 /* Sub-class of struct aarch64_insn_data, store information of
1738 instruction relocation for fast tracepoint. Visitor can
1739 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1740 the relocated instructions in buffer pointed by INSN_PTR. */
1742 struct aarch64_insn_relocation_data
1744 struct aarch64_insn_data base
;
1746 /* The new address the instruction is relocated to. */
1748 /* Pointer to the buffer of relocated instruction(s). */
1752 /* Implementation of aarch64_insn_visitor method "b". */
1755 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1756 struct aarch64_insn_data
*data
)
1758 struct aarch64_insn_relocation_data
*insn_reloc
1759 = (struct aarch64_insn_relocation_data
*) data
;
1761 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1763 if (can_encode_int32 (new_offset
, 28))
1764 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1767 /* Implementation of aarch64_insn_visitor method "b_cond". */
1770 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1771 struct aarch64_insn_data
*data
)
1773 struct aarch64_insn_relocation_data
*insn_reloc
1774 = (struct aarch64_insn_relocation_data
*) data
;
1776 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1778 if (can_encode_int32 (new_offset
, 21))
1780 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1783 else if (can_encode_int32 (new_offset
, 28))
1785 /* The offset is out of range for a conditional branch
1786 instruction but not for a unconditional branch. We can use
1787 the following instructions instead:
1789 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1790 B NOT_TAKEN ; Else jump over TAKEN and continue.
1797 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1798 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1799 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1803 /* Implementation of aarch64_insn_visitor method "cb". */
1806 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1807 const unsigned rn
, int is64
,
1808 struct aarch64_insn_data
*data
)
1810 struct aarch64_insn_relocation_data
*insn_reloc
1811 = (struct aarch64_insn_relocation_data
*) data
;
1813 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1815 if (can_encode_int32 (new_offset
, 21))
1817 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1818 aarch64_register (rn
, is64
), new_offset
);
1820 else if (can_encode_int32 (new_offset
, 28))
1822 /* The offset is out of range for a compare and branch
1823 instruction but not for a unconditional branch. We can use
1824 the following instructions instead:
1826 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1827 B NOT_TAKEN ; Else jump over TAKEN and continue.
1833 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1834 aarch64_register (rn
, is64
), 8);
1835 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1836 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1840 /* Implementation of aarch64_insn_visitor method "tb". */
1843 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1844 const unsigned rt
, unsigned bit
,
1845 struct aarch64_insn_data
*data
)
1847 struct aarch64_insn_relocation_data
*insn_reloc
1848 = (struct aarch64_insn_relocation_data
*) data
;
1850 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1852 if (can_encode_int32 (new_offset
, 16))
1854 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1855 aarch64_register (rt
, 1), new_offset
);
1857 else if (can_encode_int32 (new_offset
, 28))
1859 /* The offset is out of range for a test bit and branch
1860 instruction but not for a unconditional branch. We can use
1861 the following instructions instead:
1863 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1864 B NOT_TAKEN ; Else jump over TAKEN and continue.
1870 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1871 aarch64_register (rt
, 1), 8);
1872 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1873 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1878 /* Implementation of aarch64_insn_visitor method "adr". */
1881 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1883 struct aarch64_insn_data
*data
)
1885 struct aarch64_insn_relocation_data
*insn_reloc
1886 = (struct aarch64_insn_relocation_data
*) data
;
1887 /* We know exactly the address the ADR{P,} instruction will compute.
1888 We can just write it to the destination register. */
1889 CORE_ADDR address
= data
->insn_addr
+ offset
;
1893 /* Clear the lower 12 bits of the offset to get the 4K page. */
1894 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1895 aarch64_register (rd
, 1),
1899 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1900 aarch64_register (rd
, 1), address
);
1903 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1906 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1907 const unsigned rt
, const int is64
,
1908 struct aarch64_insn_data
*data
)
1910 struct aarch64_insn_relocation_data
*insn_reloc
1911 = (struct aarch64_insn_relocation_data
*) data
;
1912 CORE_ADDR address
= data
->insn_addr
+ offset
;
1914 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1915 aarch64_register (rt
, 1), address
);
1917 /* We know exactly what address to load from, and what register we
1920 MOV xd, #(oldloc + offset)
1921 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1924 LDR xd, [xd] ; or LDRSW xd, [xd]
1929 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1930 aarch64_register (rt
, 1),
1931 aarch64_register (rt
, 1),
1932 offset_memory_operand (0));
1934 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1935 aarch64_register (rt
, is64
),
1936 aarch64_register (rt
, 1),
1937 offset_memory_operand (0));
1940 /* Implementation of aarch64_insn_visitor method "others". */
1943 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1944 struct aarch64_insn_data
*data
)
1946 struct aarch64_insn_relocation_data
*insn_reloc
1947 = (struct aarch64_insn_relocation_data
*) data
;
1949 /* The instruction is not PC relative. Just re-emit it at the new
1951 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1954 static const struct aarch64_insn_visitor visitor
=
1956 aarch64_ftrace_insn_reloc_b
,
1957 aarch64_ftrace_insn_reloc_b_cond
,
1958 aarch64_ftrace_insn_reloc_cb
,
1959 aarch64_ftrace_insn_reloc_tb
,
1960 aarch64_ftrace_insn_reloc_adr
,
1961 aarch64_ftrace_insn_reloc_ldr_literal
,
1962 aarch64_ftrace_insn_reloc_others
,
1965 /* Implementation of linux_target_ops method
1966 "install_fast_tracepoint_jump_pad". */
1969 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1971 CORE_ADDR collector
,
1974 CORE_ADDR
*jump_entry
,
1975 CORE_ADDR
*trampoline
,
1976 ULONGEST
*trampoline_size
,
1977 unsigned char *jjump_pad_insn
,
1978 ULONGEST
*jjump_pad_insn_size
,
1979 CORE_ADDR
*adjusted_insn_addr
,
1980 CORE_ADDR
*adjusted_insn_addr_end
,
1988 CORE_ADDR buildaddr
= *jump_entry
;
1989 struct aarch64_insn_relocation_data insn_data
;
1991 /* We need to save the current state on the stack both to restore it
1992 later and to collect register values when the tracepoint is hit.
1994 The saved registers are pushed in a layout that needs to be in sync
1995 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1996 the supply_fast_tracepoint_registers function will fill in the
1997 register cache from a pointer to saved registers on the stack we build
2000 For simplicity, we set the size of each cell on the stack to 16 bytes.
2001 This way one cell can hold any register type, from system registers
2002 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2003 has to be 16 bytes aligned anyway.
2005 Note that the CPSR register does not exist on AArch64. Instead we
2006 can access system bits describing the process state with the
2007 MRS/MSR instructions, namely the condition flags. We save them as
2008 if they are part of a CPSR register because that's how GDB
2009 interprets these system bits. At the moment, only the condition
2010 flags are saved in CPSR (NZCV).
2012 Stack layout, each cell is 16 bytes (descending):
2014 High *-------- SIMD&FP registers from 31 down to 0. --------*
2020 *---- General purpose registers from 30 down to 0. ----*
2026 *------------- Special purpose registers. -------------*
2029 | CPSR (NZCV) | 5 cells
2032 *------------- collecting_t object --------------------*
2033 | TPIDR_EL0 | struct tracepoint * |
2034 Low *------------------------------------------------------*
2036 After this stack is set up, we issue a call to the collector, passing
2037 it the saved registers at (SP + 16). */
2039 /* Push SIMD&FP registers on the stack:
2041 SUB sp, sp, #(32 * 16)
2043 STP q30, q31, [sp, #(30 * 16)]
2048 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2049 for (i
= 30; i
>= 0; i
-= 2)
2050 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2052 /* Push general purpose registers on the stack. Note that we do not need
2053 to push x31 as it represents the xzr register and not the stack
2054 pointer in a STR instruction.
2056 SUB sp, sp, #(31 * 16)
2058 STR x30, [sp, #(30 * 16)]
2063 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2064 for (i
= 30; i
>= 0; i
-= 1)
2065 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2066 offset_memory_operand (i
* 16));
2068 /* Make space for 5 more cells.
2070 SUB sp, sp, #(5 * 16)
2073 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2078 ADD x4, sp, #((32 + 31 + 5) * 16)
2079 STR x4, [sp, #(4 * 16)]
2082 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2083 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2085 /* Save PC (tracepoint address):
2090 STR x3, [sp, #(3 * 16)]
2094 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2095 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2097 /* Save CPSR (NZCV), FPSR and FPCR:
2103 STR x2, [sp, #(2 * 16)]
2104 STR x1, [sp, #(1 * 16)]
2105 STR x0, [sp, #(0 * 16)]
2108 p
+= emit_mrs (p
, x2
, NZCV
);
2109 p
+= emit_mrs (p
, x1
, FPSR
);
2110 p
+= emit_mrs (p
, x0
, FPCR
);
2111 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2112 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2113 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2115 /* Push the collecting_t object. It consist of the address of the
2116 tracepoint and an ID for the current thread. We get the latter by
2117 reading the tpidr_el0 system register. It corresponds to the
2118 NT_ARM_TLS register accessible with ptrace.
2125 STP x0, x1, [sp, #-16]!
2129 p
+= emit_mov_addr (p
, x0
, tpoint
);
2130 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2131 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2135 The shared memory for the lock is at lockaddr. It will hold zero
2136 if no-one is holding the lock, otherwise it contains the address of
2137 the collecting_t object on the stack of the thread which acquired it.
2139 At this stage, the stack pointer points to this thread's collecting_t
2142 We use the following registers:
2143 - x0: Address of the lock.
2144 - x1: Pointer to collecting_t object.
2145 - x2: Scratch register.
2151 ; Trigger an event local to this core. So the following WFE
2152 ; instruction is ignored.
2155 ; Wait for an event. The event is triggered by either the SEVL
2156 ; or STLR instructions (store release).
2159 ; Atomically read at lockaddr. This marks the memory location as
2160 ; exclusive. This instruction also has memory constraints which
2161 ; make sure all previous data reads and writes are done before
2165 ; Try again if another thread holds the lock.
2168 ; We can lock it! Write the address of the collecting_t object.
2169 ; This instruction will fail if the memory location is not marked
2170 ; as exclusive anymore. If it succeeds, it will remove the
2171 ; exclusive mark on the memory location. This way, if another
2172 ; thread executes this instruction before us, we will fail and try
2179 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2180 p
+= emit_mov (p
, x1
, register_operand (sp
));
2184 p
+= emit_ldaxr (p
, x2
, x0
);
2185 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2186 p
+= emit_stxr (p
, w2
, x1
, x0
);
2187 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2189 /* Call collector (struct tracepoint *, unsigned char *):
2194 ; Saved registers start after the collecting_t object.
2197 ; We use an intra-procedure-call scratch register.
2198 MOV ip0, #(collector)
2201 ; And call back to C!
2206 p
+= emit_mov_addr (p
, x0
, tpoint
);
2207 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2209 p
+= emit_mov_addr (p
, ip0
, collector
);
2210 p
+= emit_blr (p
, ip0
);
2212 /* Release the lock.
2217 ; This instruction is a normal store with memory ordering
2218 ; constraints. Thanks to this we do not have to put a data
2219 ; barrier instruction to make sure all data read and writes are done
2220 ; before this instruction is executed. Furthermore, this instruction
2221 ; will trigger an event, letting other threads know they can grab
2226 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2227 p
+= emit_stlr (p
, xzr
, x0
);
2229 /* Free collecting_t object:
2234 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2236 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2237 registers from the stack.
2239 LDR x2, [sp, #(2 * 16)]
2240 LDR x1, [sp, #(1 * 16)]
2241 LDR x0, [sp, #(0 * 16)]
2247 ADD sp, sp #(5 * 16)
2250 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2251 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2252 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2253 p
+= emit_msr (p
, NZCV
, x2
);
2254 p
+= emit_msr (p
, FPSR
, x1
);
2255 p
+= emit_msr (p
, FPCR
, x0
);
2257 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2259 /* Pop general purpose registers:
2263 LDR x30, [sp, #(30 * 16)]
2265 ADD sp, sp, #(31 * 16)
2268 for (i
= 0; i
<= 30; i
+= 1)
2269 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2270 offset_memory_operand (i
* 16));
2271 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2273 /* Pop SIMD&FP registers:
2277 LDP q30, q31, [sp, #(30 * 16)]
2279 ADD sp, sp, #(32 * 16)
2282 for (i
= 0; i
<= 30; i
+= 2)
2283 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2284 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2286 /* Write the code into the inferior memory. */
2287 append_insns (&buildaddr
, p
- buf
, buf
);
2289 /* Now emit the relocated instruction. */
2290 *adjusted_insn_addr
= buildaddr
;
2291 target_read_uint32 (tpaddr
, &insn
);
2293 insn_data
.base
.insn_addr
= tpaddr
;
2294 insn_data
.new_addr
= buildaddr
;
2295 insn_data
.insn_ptr
= buf
;
2297 aarch64_relocate_instruction (insn
, &visitor
,
2298 (struct aarch64_insn_data
*) &insn_data
);
2300 /* We may not have been able to relocate the instruction. */
2301 if (insn_data
.insn_ptr
== buf
)
2304 "E.Could not relocate instruction from %s to %s.",
2305 core_addr_to_string_nz (tpaddr
),
2306 core_addr_to_string_nz (buildaddr
));
2310 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2311 *adjusted_insn_addr_end
= buildaddr
;
2313 /* Go back to the start of the buffer. */
2316 /* Emit a branch back from the jump pad. */
2317 offset
= (tpaddr
+ orig_size
- buildaddr
);
2318 if (!can_encode_int32 (offset
, 28))
2321 "E.Jump back from jump pad too far from tracepoint "
2322 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2327 p
+= emit_b (p
, 0, offset
);
2328 append_insns (&buildaddr
, p
- buf
, buf
);
2330 /* Give the caller a branch instruction into the jump pad. */
2331 offset
= (*jump_entry
- tpaddr
);
2332 if (!can_encode_int32 (offset
, 28))
2335 "E.Jump pad too far from tracepoint "
2336 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2341 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2342 *jjump_pad_insn_size
= 4;
2344 /* Return the end address of our pad. */
2345 *jump_entry
= buildaddr
;
2350 /* Helper function writing LEN instructions from START into
2351 current_insn_ptr. */
2354 emit_ops_insns (const uint32_t *start
, int len
)
2356 CORE_ADDR buildaddr
= current_insn_ptr
;
2359 debug_printf ("Adding %d instrucions at %s\n",
2360 len
, paddress (buildaddr
));
2362 append_insns (&buildaddr
, len
, start
);
2363 current_insn_ptr
= buildaddr
;
2366 /* Pop a register from the stack. */
2369 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2371 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2374 /* Push a register on the stack. */
2377 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2379 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2382 /* Implementation of emit_ops method "emit_prologue". */
2385 aarch64_emit_prologue (void)
2390 /* This function emit a prologue for the following function prototype:
2392 enum eval_result_type f (unsigned char *regs,
2395 The first argument is a buffer of raw registers. The second
2396 argument is the result of
2397 evaluating the expression, which will be set to whatever is on top of
2398 the stack at the end.
2400 The stack set up by the prologue is as such:
2402 High *------------------------------------------------------*
2405 | x1 (ULONGEST *value) |
2406 | x0 (unsigned char *regs) |
2407 Low *------------------------------------------------------*
2409 As we are implementing a stack machine, each opcode can expand the
2410 stack so we never know how far we are from the data saved by this
2411 prologue. In order to be able refer to value and regs later, we save
2412 the current stack pointer in the frame pointer. This way, it is not
2413 clobbered when calling C functions.
2415 Finally, throughout every operation, we are using register x0 as the
2416 top of the stack, and x1 as a scratch register. */
2418 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2419 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2420 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2422 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2425 emit_ops_insns (buf
, p
- buf
);
2428 /* Implementation of emit_ops method "emit_epilogue". */
2431 aarch64_emit_epilogue (void)
2436 /* Store the result of the expression (x0) in *value. */
2437 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2438 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2439 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2441 /* Restore the previous state. */
2442 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2443 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2445 /* Return expr_eval_no_error. */
2446 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2447 p
+= emit_ret (p
, lr
);
2449 emit_ops_insns (buf
, p
- buf
);
2452 /* Implementation of emit_ops method "emit_add". */
2455 aarch64_emit_add (void)
2460 p
+= emit_pop (p
, x1
);
2461 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2463 emit_ops_insns (buf
, p
- buf
);
2466 /* Implementation of emit_ops method "emit_sub". */
2469 aarch64_emit_sub (void)
2474 p
+= emit_pop (p
, x1
);
2475 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2477 emit_ops_insns (buf
, p
- buf
);
2480 /* Implementation of emit_ops method "emit_mul". */
2483 aarch64_emit_mul (void)
2488 p
+= emit_pop (p
, x1
);
2489 p
+= emit_mul (p
, x0
, x1
, x0
);
2491 emit_ops_insns (buf
, p
- buf
);
2494 /* Implementation of emit_ops method "emit_lsh". */
2497 aarch64_emit_lsh (void)
2502 p
+= emit_pop (p
, x1
);
2503 p
+= emit_lslv (p
, x0
, x1
, x0
);
2505 emit_ops_insns (buf
, p
- buf
);
2508 /* Implementation of emit_ops method "emit_rsh_signed". */
2511 aarch64_emit_rsh_signed (void)
2516 p
+= emit_pop (p
, x1
);
2517 p
+= emit_asrv (p
, x0
, x1
, x0
);
2519 emit_ops_insns (buf
, p
- buf
);
2522 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2525 aarch64_emit_rsh_unsigned (void)
2530 p
+= emit_pop (p
, x1
);
2531 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2533 emit_ops_insns (buf
, p
- buf
);
2536 /* Implementation of emit_ops method "emit_ext". */
2539 aarch64_emit_ext (int arg
)
2544 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2546 emit_ops_insns (buf
, p
- buf
);
2549 /* Implementation of emit_ops method "emit_log_not". */
2552 aarch64_emit_log_not (void)
2557 /* If the top of the stack is 0, replace it with 1. Else replace it with
2560 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2561 p
+= emit_cset (p
, x0
, EQ
);
2563 emit_ops_insns (buf
, p
- buf
);
2566 /* Implementation of emit_ops method "emit_bit_and". */
2569 aarch64_emit_bit_and (void)
2574 p
+= emit_pop (p
, x1
);
2575 p
+= emit_and (p
, x0
, x0
, x1
);
2577 emit_ops_insns (buf
, p
- buf
);
2580 /* Implementation of emit_ops method "emit_bit_or". */
2583 aarch64_emit_bit_or (void)
2588 p
+= emit_pop (p
, x1
);
2589 p
+= emit_orr (p
, x0
, x0
, x1
);
2591 emit_ops_insns (buf
, p
- buf
);
2594 /* Implementation of emit_ops method "emit_bit_xor". */
2597 aarch64_emit_bit_xor (void)
2602 p
+= emit_pop (p
, x1
);
2603 p
+= emit_eor (p
, x0
, x0
, x1
);
2605 emit_ops_insns (buf
, p
- buf
);
2608 /* Implementation of emit_ops method "emit_bit_not". */
2611 aarch64_emit_bit_not (void)
2616 p
+= emit_mvn (p
, x0
, x0
);
2618 emit_ops_insns (buf
, p
- buf
);
2621 /* Implementation of emit_ops method "emit_equal". */
2624 aarch64_emit_equal (void)
2629 p
+= emit_pop (p
, x1
);
2630 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2631 p
+= emit_cset (p
, x0
, EQ
);
2633 emit_ops_insns (buf
, p
- buf
);
2636 /* Implementation of emit_ops method "emit_less_signed". */
2639 aarch64_emit_less_signed (void)
2644 p
+= emit_pop (p
, x1
);
2645 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2646 p
+= emit_cset (p
, x0
, LT
);
2648 emit_ops_insns (buf
, p
- buf
);
2651 /* Implementation of emit_ops method "emit_less_unsigned". */
2654 aarch64_emit_less_unsigned (void)
2659 p
+= emit_pop (p
, x1
);
2660 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2661 p
+= emit_cset (p
, x0
, LO
);
2663 emit_ops_insns (buf
, p
- buf
);
2666 /* Implementation of emit_ops method "emit_ref". */
2669 aarch64_emit_ref (int size
)
2677 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2680 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2683 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2686 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2689 /* Unknown size, bail on compilation. */
2694 emit_ops_insns (buf
, p
- buf
);
2697 /* Implementation of emit_ops method "emit_if_goto". */
2700 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2705 /* The Z flag is set or cleared here. */
2706 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2707 /* This instruction must not change the Z flag. */
2708 p
+= emit_pop (p
, x0
);
2709 /* Branch over the next instruction if x0 == 0. */
2710 p
+= emit_bcond (p
, EQ
, 8);
2712 /* The NOP instruction will be patched with an unconditional branch. */
2714 *offset_p
= (p
- buf
) * 4;
2719 emit_ops_insns (buf
, p
- buf
);
2722 /* Implementation of emit_ops method "emit_goto". */
2725 aarch64_emit_goto (int *offset_p
, int *size_p
)
2730 /* The NOP instruction will be patched with an unconditional branch. */
2737 emit_ops_insns (buf
, p
- buf
);
2740 /* Implementation of emit_ops method "write_goto_address". */
2743 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2747 emit_b (&insn
, 0, to
- from
);
2748 append_insns (&from
, 1, &insn
);
2751 /* Implementation of emit_ops method "emit_const". */
2754 aarch64_emit_const (LONGEST num
)
2759 p
+= emit_mov_addr (p
, x0
, num
);
2761 emit_ops_insns (buf
, p
- buf
);
2764 /* Implementation of emit_ops method "emit_call". */
2767 aarch64_emit_call (CORE_ADDR fn
)
2772 p
+= emit_mov_addr (p
, ip0
, fn
);
2773 p
+= emit_blr (p
, ip0
);
2775 emit_ops_insns (buf
, p
- buf
);
2778 /* Implementation of emit_ops method "emit_reg". */
2781 aarch64_emit_reg (int reg
)
2786 /* Set x0 to unsigned char *regs. */
2787 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2788 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2789 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2791 emit_ops_insns (buf
, p
- buf
);
2793 aarch64_emit_call (get_raw_reg_func_addr ());
2796 /* Implementation of emit_ops method "emit_pop". */
2799 aarch64_emit_pop (void)
2804 p
+= emit_pop (p
, x0
);
2806 emit_ops_insns (buf
, p
- buf
);
2809 /* Implementation of emit_ops method "emit_stack_flush". */
2812 aarch64_emit_stack_flush (void)
2817 p
+= emit_push (p
, x0
);
2819 emit_ops_insns (buf
, p
- buf
);
2822 /* Implementation of emit_ops method "emit_zero_ext". */
2825 aarch64_emit_zero_ext (int arg
)
2830 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2832 emit_ops_insns (buf
, p
- buf
);
2835 /* Implementation of emit_ops method "emit_swap". */
2838 aarch64_emit_swap (void)
2843 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2844 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2845 p
+= emit_mov (p
, x0
, register_operand (x1
));
2847 emit_ops_insns (buf
, p
- buf
);
2850 /* Implementation of emit_ops method "emit_stack_adjust". */
2853 aarch64_emit_stack_adjust (int n
)
2855 /* This is not needed with our design. */
2859 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2861 emit_ops_insns (buf
, p
- buf
);
2864 /* Implementation of emit_ops method "emit_int_call_1". */
2867 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2872 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2874 emit_ops_insns (buf
, p
- buf
);
2876 aarch64_emit_call (fn
);
2879 /* Implementation of emit_ops method "emit_void_call_2". */
2882 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2887 /* Push x0 on the stack. */
2888 aarch64_emit_stack_flush ();
2890 /* Setup arguments for the function call:
2893 x1: top of the stack
2898 p
+= emit_mov (p
, x1
, register_operand (x0
));
2899 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2901 emit_ops_insns (buf
, p
- buf
);
2903 aarch64_emit_call (fn
);
2906 aarch64_emit_pop ();
2909 /* Implementation of emit_ops method "emit_eq_goto". */
2912 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2917 p
+= emit_pop (p
, x1
);
2918 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2919 /* Branch over the next instruction if x0 != x1. */
2920 p
+= emit_bcond (p
, NE
, 8);
2921 /* The NOP instruction will be patched with an unconditional branch. */
2923 *offset_p
= (p
- buf
) * 4;
2928 emit_ops_insns (buf
, p
- buf
);
2931 /* Implementation of emit_ops method "emit_ne_goto". */
2934 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2939 p
+= emit_pop (p
, x1
);
2940 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2941 /* Branch over the next instruction if x0 == x1. */
2942 p
+= emit_bcond (p
, EQ
, 8);
2943 /* The NOP instruction will be patched with an unconditional branch. */
2945 *offset_p
= (p
- buf
) * 4;
2950 emit_ops_insns (buf
, p
- buf
);
2953 /* Implementation of emit_ops method "emit_lt_goto". */
2956 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2961 p
+= emit_pop (p
, x1
);
2962 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2963 /* Branch over the next instruction if x0 >= x1. */
2964 p
+= emit_bcond (p
, GE
, 8);
2965 /* The NOP instruction will be patched with an unconditional branch. */
2967 *offset_p
= (p
- buf
) * 4;
2972 emit_ops_insns (buf
, p
- buf
);
2975 /* Implementation of emit_ops method "emit_le_goto". */
2978 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2983 p
+= emit_pop (p
, x1
);
2984 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2985 /* Branch over the next instruction if x0 > x1. */
2986 p
+= emit_bcond (p
, GT
, 8);
2987 /* The NOP instruction will be patched with an unconditional branch. */
2989 *offset_p
= (p
- buf
) * 4;
2994 emit_ops_insns (buf
, p
- buf
);
2997 /* Implementation of emit_ops method "emit_gt_goto". */
3000 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3005 p
+= emit_pop (p
, x1
);
3006 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3007 /* Branch over the next instruction if x0 <= x1. */
3008 p
+= emit_bcond (p
, LE
, 8);
3009 /* The NOP instruction will be patched with an unconditional branch. */
3011 *offset_p
= (p
- buf
) * 4;
3016 emit_ops_insns (buf
, p
- buf
);
3019 /* Implementation of emit_ops method "emit_ge_got". */
3022 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3027 p
+= emit_pop (p
, x1
);
3028 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3029 /* Branch over the next instruction if x0 <= x1. */
3030 p
+= emit_bcond (p
, LT
, 8);
3031 /* The NOP instruction will be patched with an unconditional branch. */
3033 *offset_p
= (p
- buf
) * 4;
3038 emit_ops_insns (buf
, p
- buf
);
3041 static struct emit_ops aarch64_emit_ops_impl
=
3043 aarch64_emit_prologue
,
3044 aarch64_emit_epilogue
,
3049 aarch64_emit_rsh_signed
,
3050 aarch64_emit_rsh_unsigned
,
3052 aarch64_emit_log_not
,
3053 aarch64_emit_bit_and
,
3054 aarch64_emit_bit_or
,
3055 aarch64_emit_bit_xor
,
3056 aarch64_emit_bit_not
,
3058 aarch64_emit_less_signed
,
3059 aarch64_emit_less_unsigned
,
3061 aarch64_emit_if_goto
,
3063 aarch64_write_goto_address
,
3068 aarch64_emit_stack_flush
,
3069 aarch64_emit_zero_ext
,
3071 aarch64_emit_stack_adjust
,
3072 aarch64_emit_int_call_1
,
3073 aarch64_emit_void_call_2
,
3074 aarch64_emit_eq_goto
,
3075 aarch64_emit_ne_goto
,
3076 aarch64_emit_lt_goto
,
3077 aarch64_emit_le_goto
,
3078 aarch64_emit_gt_goto
,
3079 aarch64_emit_ge_got
,
3082 /* Implementation of linux_target_ops method "emit_ops". */
3084 static struct emit_ops
*
3085 aarch64_emit_ops (void)
3087 return &aarch64_emit_ops_impl
;
3090 /* Implementation of linux_target_ops method
3091 "get_min_fast_tracepoint_insn_len". */
3094 aarch64_get_min_fast_tracepoint_insn_len (void)
3099 /* Implementation of linux_target_ops method "supports_range_stepping". */
3102 aarch64_supports_range_stepping (void)
3107 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3110 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3112 if (is_64bit_tdesc ())
3114 *size
= aarch64_breakpoint_len
;
3115 return aarch64_breakpoint
;
3118 return arm_sw_breakpoint_from_kind (kind
, size
);
3121 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3124 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3126 if (is_64bit_tdesc ())
3127 return aarch64_breakpoint_len
;
3129 return arm_breakpoint_kind_from_pc (pcptr
);
3132 /* Implementation of the target ops method
3133 "breakpoint_kind_from_current_state". */
3136 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3138 if (is_64bit_tdesc ())
3139 return aarch64_breakpoint_len
;
3141 return arm_breakpoint_kind_from_current_state (pcptr
);
3144 /* Support for hardware single step. */
3147 aarch64_supports_hardware_single_step (void)
3152 struct linux_target_ops the_low_target
=
3154 aarch64_install_fast_tracepoint_jump_pad
,
3156 aarch64_get_min_fast_tracepoint_insn_len
,
3157 aarch64_supports_range_stepping
,
3158 aarch64_supports_hardware_single_step
,
3159 aarch64_get_syscall_trapinfo
,
3162 /* The linux target ops object. */
3164 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3167 initialize_low_arch (void)
3169 initialize_low_arch_aarch32 ();
3171 initialize_regsets_info (&aarch64_regsets_info
);
3172 initialize_regsets_info (&aarch64_sve_regsets_info
);