1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
68 void low_arch_setup () override
;
70 bool low_cannot_fetch_register (int regno
) override
;
72 bool low_cannot_store_register (int regno
) override
;
74 bool low_supports_breakpoints () override
;
76 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
78 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
80 bool low_breakpoint_at (CORE_ADDR pc
) override
;
83 /* The singleton target ops object. */
85 static aarch64_target the_aarch64_target
;
88 aarch64_target::low_cannot_fetch_register (int regno
)
90 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
91 "is not implemented by the target");
95 aarch64_target::low_cannot_store_register (int regno
)
97 gdb_assert_not_reached ("linux target op low_cannot_store_register "
98 "is not implemented by the target");
101 /* Per-process arch-specific data we want to keep. */
103 struct arch_process_info
105 /* Hardware breakpoint/watchpoint data.
106 The reason for them to be per-process rather than per-thread is
107 due to the lack of information in the gdbserver environment;
108 gdbserver is not told that whether a requested hardware
109 breakpoint/watchpoint is thread specific or not, so it has to set
110 each hw bp/wp for every thread in the current process. The
111 higher level bp/wp management in gdb will resume a thread if a hw
112 bp/wp trap is not expected for it. Since the hw bp/wp setting is
113 same for each thread, it is reasonable for the data to live here.
115 struct aarch64_debug_reg_state debug_reg_state
;
118 /* Return true if the size of register 0 is 8 byte. */
121 is_64bit_tdesc (void)
123 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
125 return register_size (regcache
->tdesc
, 0) == 8;
128 /* Return true if the regcache contains the number of SVE registers. */
133 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
135 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
139 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
141 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
144 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
145 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
146 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
147 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
148 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
152 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
154 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
157 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
158 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
159 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
160 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
161 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
165 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
167 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
170 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
171 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
172 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
173 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
177 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
179 const struct user_fpsimd_state
*regset
180 = (const struct user_fpsimd_state
*) buf
;
183 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
184 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
185 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
186 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
189 /* Store the pauth registers to regcache. */
192 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
194 uint64_t *pauth_regset
= (uint64_t *) buf
;
195 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
200 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
202 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
207 aarch64_target::low_supports_breakpoints ()
212 /* Implementation of linux target ops method "low_get_pc". */
215 aarch64_target::low_get_pc (regcache
*regcache
)
217 if (register_size (regcache
->tdesc
, 0) == 8)
218 return linux_get_pc_64bit (regcache
);
220 return linux_get_pc_32bit (regcache
);
223 /* Implementation of linux target ops method "low_set_pc". */
226 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
228 if (register_size (regcache
->tdesc
, 0) == 8)
229 linux_set_pc_64bit (regcache
, pc
);
231 linux_set_pc_32bit (regcache
, pc
);
234 #define aarch64_breakpoint_len 4
236 /* AArch64 BRK software debug mode instruction.
237 This instruction needs to match gdb/aarch64-tdep.c
238 (aarch64_default_breakpoint). */
239 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
241 /* Implementation of linux target ops method "low_breakpoint_at". */
244 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
246 if (is_64bit_tdesc ())
248 gdb_byte insn
[aarch64_breakpoint_len
];
250 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
251 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
257 return arm_breakpoint_at (where
);
261 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
265 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
267 state
->dr_addr_bp
[i
] = 0;
268 state
->dr_ctrl_bp
[i
] = 0;
269 state
->dr_ref_count_bp
[i
] = 0;
272 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
274 state
->dr_addr_wp
[i
] = 0;
275 state
->dr_ctrl_wp
[i
] = 0;
276 state
->dr_ref_count_wp
[i
] = 0;
280 /* Return the pointer to the debug register state structure in the
281 current process' arch-specific data area. */
283 struct aarch64_debug_reg_state
*
284 aarch64_get_debug_reg_state (pid_t pid
)
286 struct process_info
*proc
= find_process_pid (pid
);
288 return &proc
->priv
->arch_private
->debug_reg_state
;
291 /* Implementation of linux_target_ops method "supports_z_point_type". */
294 aarch64_supports_z_point_type (char z_type
)
300 case Z_PACKET_WRITE_WP
:
301 case Z_PACKET_READ_WP
:
302 case Z_PACKET_ACCESS_WP
:
309 /* Implementation of linux_target_ops method "insert_point".
311 It actually only records the info of the to-be-inserted bp/wp;
312 the actual insertion will happen when threads are resumed. */
315 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
316 int len
, struct raw_breakpoint
*bp
)
319 enum target_hw_bp_type targ_type
;
320 struct aarch64_debug_reg_state
*state
321 = aarch64_get_debug_reg_state (pid_of (current_thread
));
324 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
325 (unsigned long) addr
, len
);
327 /* Determine the type from the raw breakpoint type. */
328 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
330 if (targ_type
!= hw_execute
)
332 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
333 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
334 1 /* is_insert */, state
);
342 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
343 instruction. Set it to 2 to correctly encode length bit
344 mask in hardware/watchpoint control register. */
347 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
348 1 /* is_insert */, state
);
352 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
358 /* Implementation of linux_target_ops method "remove_point".
360 It actually only records the info of the to-be-removed bp/wp,
361 the actual removal will be done when threads are resumed. */
364 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
365 int len
, struct raw_breakpoint
*bp
)
368 enum target_hw_bp_type targ_type
;
369 struct aarch64_debug_reg_state
*state
370 = aarch64_get_debug_reg_state (pid_of (current_thread
));
373 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
374 (unsigned long) addr
, len
);
376 /* Determine the type from the raw breakpoint type. */
377 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
379 /* Set up state pointers. */
380 if (targ_type
!= hw_execute
)
382 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
388 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
389 instruction. Set it to 2 to correctly encode length bit
390 mask in hardware/watchpoint control register. */
393 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
394 0 /* is_insert */, state
);
398 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
404 /* Implementation of linux_target_ops method "stopped_data_address". */
407 aarch64_stopped_data_address (void)
411 struct aarch64_debug_reg_state
*state
;
413 pid
= lwpid_of (current_thread
);
415 /* Get the siginfo. */
416 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
417 return (CORE_ADDR
) 0;
419 /* Need to be a hardware breakpoint/watchpoint trap. */
420 if (siginfo
.si_signo
!= SIGTRAP
421 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
422 return (CORE_ADDR
) 0;
424 /* Check if the address matches any watched address. */
425 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
426 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
428 const unsigned int offset
429 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
430 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
431 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
432 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
433 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
434 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
436 if (state
->dr_ref_count_wp
[i
]
437 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
438 && addr_trap
>= addr_watch_aligned
439 && addr_trap
< addr_watch
+ len
)
441 /* ADDR_TRAP reports the first address of the memory range
442 accessed by the CPU, regardless of what was the memory
443 range watched. Thus, a large CPU access that straddles
444 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
445 ADDR_TRAP that is lower than the
446 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
448 addr: | 4 | 5 | 6 | 7 | 8 |
449 |---- range watched ----|
450 |----------- range accessed ------------|
452 In this case, ADDR_TRAP will be 4.
454 To match a watchpoint known to GDB core, we must never
455 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
456 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
457 positive on kernels older than 4.10. See PR
463 return (CORE_ADDR
) 0;
466 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
469 aarch64_stopped_by_watchpoint (void)
471 if (aarch64_stopped_data_address () != 0)
477 /* Fetch the thread-local storage pointer for libthread_db. */
480 ps_get_thread_area (struct ps_prochandle
*ph
,
481 lwpid_t lwpid
, int idx
, void **base
)
483 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
487 /* Implementation of linux_target_ops method "siginfo_fixup". */
490 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
492 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
493 if (!is_64bit_tdesc ())
496 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
499 aarch64_siginfo_from_compat_siginfo (native
,
500 (struct compat_siginfo
*) inf
);
508 /* Implementation of linux_target_ops method "new_process". */
510 static struct arch_process_info
*
511 aarch64_linux_new_process (void)
513 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
515 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
520 /* Implementation of linux_target_ops method "delete_process". */
523 aarch64_linux_delete_process (struct arch_process_info
*info
)
528 /* Implementation of linux_target_ops method "linux_new_fork". */
531 aarch64_linux_new_fork (struct process_info
*parent
,
532 struct process_info
*child
)
534 /* These are allocated by linux_add_process. */
535 gdb_assert (parent
->priv
!= NULL
536 && parent
->priv
->arch_private
!= NULL
);
537 gdb_assert (child
->priv
!= NULL
538 && child
->priv
->arch_private
!= NULL
);
540 /* Linux kernel before 2.6.33 commit
541 72f674d203cd230426437cdcf7dd6f681dad8b0d
542 will inherit hardware debug registers from parent
543 on fork/vfork/clone. Newer Linux kernels create such tasks with
544 zeroed debug registers.
546 GDB core assumes the child inherits the watchpoints/hw
547 breakpoints of the parent, and will remove them all from the
548 forked off process. Copy the debug registers mirrors into the
549 new process so that all breakpoints and watchpoints can be
550 removed together. The debug registers mirror will become zeroed
551 in the end before detaching the forked off process, thus making
552 this compatible with older Linux kernels too. */
554 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
557 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
558 #define AARCH64_HWCAP_PACA (1 << 30)
560 /* Implementation of linux target ops method "low_arch_setup". */
563 aarch64_target::low_arch_setup ()
565 unsigned int machine
;
569 tid
= lwpid_of (current_thread
);
571 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
575 uint64_t vq
= aarch64_sve_get_vq (tid
);
576 unsigned long hwcap
= linux_get_hwcap (8);
577 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
579 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
582 current_process ()->tdesc
= aarch32_linux_read_description ();
584 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
587 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
590 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
592 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
595 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
598 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
600 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
603 static struct regset_info aarch64_regsets
[] =
605 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
606 sizeof (struct user_pt_regs
), GENERAL_REGS
,
607 aarch64_fill_gregset
, aarch64_store_gregset
},
608 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
609 sizeof (struct user_fpsimd_state
), FP_REGS
,
610 aarch64_fill_fpregset
, aarch64_store_fpregset
612 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
613 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
614 NULL
, aarch64_store_pauthregset
},
618 static struct regsets_info aarch64_regsets_info
=
620 aarch64_regsets
, /* regsets */
622 NULL
, /* disabled_regsets */
625 static struct regs_info regs_info_aarch64
=
627 NULL
, /* regset_bitmap */
629 &aarch64_regsets_info
,
632 static struct regset_info aarch64_sve_regsets
[] =
634 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
635 sizeof (struct user_pt_regs
), GENERAL_REGS
,
636 aarch64_fill_gregset
, aarch64_store_gregset
},
637 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
638 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
639 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
641 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
642 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
643 NULL
, aarch64_store_pauthregset
},
647 static struct regsets_info aarch64_sve_regsets_info
=
649 aarch64_sve_regsets
, /* regsets. */
650 0, /* num_regsets. */
651 NULL
, /* disabled_regsets. */
654 static struct regs_info regs_info_aarch64_sve
=
656 NULL
, /* regset_bitmap. */
658 &aarch64_sve_regsets_info
,
661 /* Implementation of linux target ops method "get_regs_info". */
664 aarch64_target::get_regs_info ()
666 if (!is_64bit_tdesc ())
667 return ®s_info_aarch32
;
670 return ®s_info_aarch64_sve
;
672 return ®s_info_aarch64
;
675 /* Implementation of linux_target_ops method "supports_tracepoints". */
678 aarch64_supports_tracepoints (void)
680 if (current_thread
== NULL
)
684 /* We don't support tracepoints on aarch32 now. */
685 return is_64bit_tdesc ();
689 /* Implementation of linux_target_ops method "get_thread_area". */
692 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
697 iovec
.iov_base
= ®
;
698 iovec
.iov_len
= sizeof (reg
);
700 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
708 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
711 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
713 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
719 collect_register_by_name (regcache
, "x8", &l_sysno
);
720 *sysno
= (int) l_sysno
;
723 collect_register_by_name (regcache
, "r7", sysno
);
726 /* List of condition codes that we need. */
728 enum aarch64_condition_codes
739 enum aarch64_operand_type
745 /* Representation of an operand. At this time, it only supports register
746 and immediate types. */
748 struct aarch64_operand
750 /* Type of the operand. */
751 enum aarch64_operand_type type
;
753 /* Value of the operand according to the type. */
757 struct aarch64_register reg
;
761 /* List of registers that we are currently using, we can add more here as
762 we need to use them. */
764 /* General purpose scratch registers (64 bit). */
765 static const struct aarch64_register x0
= { 0, 1 };
766 static const struct aarch64_register x1
= { 1, 1 };
767 static const struct aarch64_register x2
= { 2, 1 };
768 static const struct aarch64_register x3
= { 3, 1 };
769 static const struct aarch64_register x4
= { 4, 1 };
771 /* General purpose scratch registers (32 bit). */
772 static const struct aarch64_register w0
= { 0, 0 };
773 static const struct aarch64_register w2
= { 2, 0 };
775 /* Intra-procedure scratch registers. */
776 static const struct aarch64_register ip0
= { 16, 1 };
778 /* Special purpose registers. */
779 static const struct aarch64_register fp
= { 29, 1 };
780 static const struct aarch64_register lr
= { 30, 1 };
781 static const struct aarch64_register sp
= { 31, 1 };
782 static const struct aarch64_register xzr
= { 31, 1 };
784 /* Dynamically allocate a new register. If we know the register
785 statically, we should make it a global as above instead of using this
788 static struct aarch64_register
789 aarch64_register (unsigned num
, int is64
)
791 return (struct aarch64_register
) { num
, is64
};
794 /* Helper function to create a register operand, for instructions with
795 different types of operands.
798 p += emit_mov (p, x0, register_operand (x1)); */
800 static struct aarch64_operand
801 register_operand (struct aarch64_register reg
)
803 struct aarch64_operand operand
;
805 operand
.type
= OPERAND_REGISTER
;
811 /* Helper function to create an immediate operand, for instructions with
812 different types of operands.
815 p += emit_mov (p, x0, immediate_operand (12)); */
817 static struct aarch64_operand
818 immediate_operand (uint32_t imm
)
820 struct aarch64_operand operand
;
822 operand
.type
= OPERAND_IMMEDIATE
;
828 /* Helper function to create an offset memory operand.
831 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
833 static struct aarch64_memory_operand
834 offset_memory_operand (int32_t offset
)
836 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
839 /* Helper function to create a pre-index memory operand.
842 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
844 static struct aarch64_memory_operand
845 preindex_memory_operand (int32_t index
)
847 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
850 /* Helper function to create a post-index memory operand.
853 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
855 static struct aarch64_memory_operand
856 postindex_memory_operand (int32_t index
)
858 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
861 /* System control registers. These special registers can be written and
862 read with the MRS and MSR instructions.
864 - NZCV: Condition flags. GDB refers to this register under the CPSR
866 - FPSR: Floating-point status register.
867 - FPCR: Floating-point control registers.
868 - TPIDR_EL0: Software thread ID register. */
870 enum aarch64_system_control_registers
872 /* op0 op1 crn crm op2 */
873 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
874 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
875 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
876 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
879 /* Write a BLR instruction into *BUF.
883 RN is the register to branch to. */
886 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
888 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
891 /* Write a RET instruction into *BUF.
895 RN is the register to branch to. */
898 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
900 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
904 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
905 struct aarch64_register rt
,
906 struct aarch64_register rt2
,
907 struct aarch64_register rn
,
908 struct aarch64_memory_operand operand
)
915 opc
= ENCODE (2, 2, 30);
917 opc
= ENCODE (0, 2, 30);
919 switch (operand
.type
)
921 case MEMORY_OPERAND_OFFSET
:
923 pre_index
= ENCODE (1, 1, 24);
924 write_back
= ENCODE (0, 1, 23);
927 case MEMORY_OPERAND_POSTINDEX
:
929 pre_index
= ENCODE (0, 1, 24);
930 write_back
= ENCODE (1, 1, 23);
933 case MEMORY_OPERAND_PREINDEX
:
935 pre_index
= ENCODE (1, 1, 24);
936 write_back
= ENCODE (1, 1, 23);
943 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
944 | ENCODE (operand
.index
>> 3, 7, 15)
945 | ENCODE (rt2
.num
, 5, 10)
946 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
949 /* Write a STP instruction into *BUF.
951 STP rt, rt2, [rn, #offset]
952 STP rt, rt2, [rn, #index]!
953 STP rt, rt2, [rn], #index
955 RT and RT2 are the registers to store.
956 RN is the base address register.
957 OFFSET is the immediate to add to the base address. It is limited to a
958 -512 .. 504 range (7 bits << 3). */
961 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
962 struct aarch64_register rt2
, struct aarch64_register rn
,
963 struct aarch64_memory_operand operand
)
965 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
968 /* Write a LDP instruction into *BUF.
970 LDP rt, rt2, [rn, #offset]
971 LDP rt, rt2, [rn, #index]!
972 LDP rt, rt2, [rn], #index
974 RT and RT2 are the registers to store.
975 RN is the base address register.
976 OFFSET is the immediate to add to the base address. It is limited to a
977 -512 .. 504 range (7 bits << 3). */
980 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
981 struct aarch64_register rt2
, struct aarch64_register rn
,
982 struct aarch64_memory_operand operand
)
984 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
987 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
989 LDP qt, qt2, [rn, #offset]
991 RT and RT2 are the Q registers to store.
992 RN is the base address register.
993 OFFSET is the immediate to add to the base address. It is limited to
994 -1024 .. 1008 range (7 bits << 4). */
997 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
998 struct aarch64_register rn
, int32_t offset
)
1000 uint32_t opc
= ENCODE (2, 2, 30);
1001 uint32_t pre_index
= ENCODE (1, 1, 24);
1003 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1004 | ENCODE (offset
>> 4, 7, 15)
1005 | ENCODE (rt2
, 5, 10)
1006 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1009 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1011 STP qt, qt2, [rn, #offset]
1013 RT and RT2 are the Q registers to store.
1014 RN is the base address register.
1015 OFFSET is the immediate to add to the base address. It is limited to
1016 -1024 .. 1008 range (7 bits << 4). */
1019 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1020 struct aarch64_register rn
, int32_t offset
)
1022 uint32_t opc
= ENCODE (2, 2, 30);
1023 uint32_t pre_index
= ENCODE (1, 1, 24);
1025 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1026 | ENCODE (offset
>> 4, 7, 15)
1027 | ENCODE (rt2
, 5, 10)
1028 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1031 /* Write a LDRH instruction into *BUF.
1033 LDRH wt, [xn, #offset]
1034 LDRH wt, [xn, #index]!
1035 LDRH wt, [xn], #index
1037 RT is the register to store.
1038 RN is the base address register.
1039 OFFSET is the immediate to add to the base address. It is limited to
1040 0 .. 32760 range (12 bits << 3). */
1043 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1044 struct aarch64_register rn
,
1045 struct aarch64_memory_operand operand
)
1047 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1050 /* Write a LDRB instruction into *BUF.
1052 LDRB wt, [xn, #offset]
1053 LDRB wt, [xn, #index]!
1054 LDRB wt, [xn], #index
1056 RT is the register to store.
1057 RN is the base address register.
1058 OFFSET is the immediate to add to the base address. It is limited to
1059 0 .. 32760 range (12 bits << 3). */
1062 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1063 struct aarch64_register rn
,
1064 struct aarch64_memory_operand operand
)
1066 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1071 /* Write a STR instruction into *BUF.
1073 STR rt, [rn, #offset]
1074 STR rt, [rn, #index]!
1075 STR rt, [rn], #index
1077 RT is the register to store.
1078 RN is the base address register.
1079 OFFSET is the immediate to add to the base address. It is limited to
1080 0 .. 32760 range (12 bits << 3). */
1083 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1084 struct aarch64_register rn
,
1085 struct aarch64_memory_operand operand
)
1087 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1090 /* Helper function emitting an exclusive load or store instruction. */
1093 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1094 enum aarch64_opcodes opcode
,
1095 struct aarch64_register rs
,
1096 struct aarch64_register rt
,
1097 struct aarch64_register rt2
,
1098 struct aarch64_register rn
)
1100 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1101 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1102 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1105 /* Write a LAXR instruction into *BUF.
1109 RT is the destination register.
1110 RN is the base address register. */
1113 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1114 struct aarch64_register rn
)
1116 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1120 /* Write a STXR instruction into *BUF.
1124 RS is the result register, it indicates if the store succeeded or not.
1125 RT is the destination register.
1126 RN is the base address register. */
1129 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1130 struct aarch64_register rt
, struct aarch64_register rn
)
1132 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1136 /* Write a STLR instruction into *BUF.
1140 RT is the register to store.
1141 RN is the base address register. */
1144 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1145 struct aarch64_register rn
)
1147 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1151 /* Helper function for data processing instructions with register sources. */
1154 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1155 struct aarch64_register rd
,
1156 struct aarch64_register rn
,
1157 struct aarch64_register rm
)
1159 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1161 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1162 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1165 /* Helper function for data processing instructions taking either a register
1169 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1170 struct aarch64_register rd
,
1171 struct aarch64_register rn
,
1172 struct aarch64_operand operand
)
1174 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1175 /* The opcode is different for register and immediate source operands. */
1176 uint32_t operand_opcode
;
1178 if (operand
.type
== OPERAND_IMMEDIATE
)
1180 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1181 operand_opcode
= ENCODE (8, 4, 25);
1183 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1184 | ENCODE (operand
.imm
, 12, 10)
1185 | ENCODE (rn
.num
, 5, 5)
1186 | ENCODE (rd
.num
, 5, 0));
1190 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1191 operand_opcode
= ENCODE (5, 4, 25);
1193 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1198 /* Write an ADD instruction into *BUF.
1203 This function handles both an immediate and register add.
1205 RD is the destination register.
1206 RN is the input register.
1207 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1208 OPERAND_REGISTER. */
1211 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1212 struct aarch64_register rn
, struct aarch64_operand operand
)
1214 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1217 /* Write a SUB instruction into *BUF.
1222 This function handles both an immediate and register sub.
1224 RD is the destination register.
1225 RN is the input register.
1226 IMM is the immediate to substract to RN. */
1229 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1230 struct aarch64_register rn
, struct aarch64_operand operand
)
1232 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1235 /* Write a MOV instruction into *BUF.
1240 This function handles both a wide immediate move and a register move,
1241 with the condition that the source register is not xzr. xzr and the
1242 stack pointer share the same encoding and this function only supports
1245 RD is the destination register.
1246 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1247 OPERAND_REGISTER. */
1250 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1251 struct aarch64_operand operand
)
1253 if (operand
.type
== OPERAND_IMMEDIATE
)
1255 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1256 /* Do not shift the immediate. */
1257 uint32_t shift
= ENCODE (0, 2, 21);
1259 return aarch64_emit_insn (buf
, MOV
| size
| shift
1260 | ENCODE (operand
.imm
, 16, 5)
1261 | ENCODE (rd
.num
, 5, 0));
1264 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1267 /* Write a MOVK instruction into *BUF.
1269 MOVK rd, #imm, lsl #shift
1271 RD is the destination register.
1272 IMM is the immediate.
1273 SHIFT is the logical shift left to apply to IMM. */
1276 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1279 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1281 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1282 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1285 /* Write instructions into *BUF in order to move ADDR into a register.
1286 ADDR can be a 64-bit value.
1288 This function will emit a series of MOV and MOVK instructions, such as:
1291 MOVK xd, #(addr >> 16), lsl #16
1292 MOVK xd, #(addr >> 32), lsl #32
1293 MOVK xd, #(addr >> 48), lsl #48 */
1296 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1300 /* The MOV (wide immediate) instruction clears to top bits of the
1302 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1304 if ((addr
>> 16) != 0)
1305 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1309 if ((addr
>> 32) != 0)
1310 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1314 if ((addr
>> 48) != 0)
1315 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1320 /* Write a SUBS instruction into *BUF.
1324 This instruction update the condition flags.
1326 RD is the destination register.
1327 RN and RM are the source registers. */
1330 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1331 struct aarch64_register rn
, struct aarch64_operand operand
)
1333 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1336 /* Write a CMP instruction into *BUF.
1340 This instruction is an alias of SUBS xzr, rn, rm.
1342 RN and RM are the registers to compare. */
1345 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1346 struct aarch64_operand operand
)
1348 return emit_subs (buf
, xzr
, rn
, operand
);
1351 /* Write a AND instruction into *BUF.
1355 RD is the destination register.
1356 RN and RM are the source registers. */
1359 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1360 struct aarch64_register rn
, struct aarch64_register rm
)
1362 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1365 /* Write a ORR instruction into *BUF.
1369 RD is the destination register.
1370 RN and RM are the source registers. */
1373 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1374 struct aarch64_register rn
, struct aarch64_register rm
)
1376 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1379 /* Write a ORN instruction into *BUF.
1383 RD is the destination register.
1384 RN and RM are the source registers. */
1387 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1388 struct aarch64_register rn
, struct aarch64_register rm
)
1390 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1393 /* Write a EOR instruction into *BUF.
1397 RD is the destination register.
1398 RN and RM are the source registers. */
1401 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1402 struct aarch64_register rn
, struct aarch64_register rm
)
1404 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1407 /* Write a MVN instruction into *BUF.
1411 This is an alias for ORN rd, xzr, rm.
1413 RD is the destination register.
1414 RM is the source register. */
1417 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1418 struct aarch64_register rm
)
1420 return emit_orn (buf
, rd
, xzr
, rm
);
1423 /* Write a LSLV instruction into *BUF.
1427 RD is the destination register.
1428 RN and RM are the source registers. */
1431 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1432 struct aarch64_register rn
, struct aarch64_register rm
)
1434 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1437 /* Write a LSRV instruction into *BUF.
1441 RD is the destination register.
1442 RN and RM are the source registers. */
1445 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1446 struct aarch64_register rn
, struct aarch64_register rm
)
1448 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1451 /* Write a ASRV instruction into *BUF.
1455 RD is the destination register.
1456 RN and RM are the source registers. */
1459 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1460 struct aarch64_register rn
, struct aarch64_register rm
)
1462 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1465 /* Write a MUL instruction into *BUF.
1469 RD is the destination register.
1470 RN and RM are the source registers. */
1473 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1474 struct aarch64_register rn
, struct aarch64_register rm
)
1476 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1479 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1483 RT is the destination register.
1484 SYSTEM_REG is special purpose register to read. */
1487 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1488 enum aarch64_system_control_registers system_reg
)
1490 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1491 | ENCODE (rt
.num
, 5, 0));
1494 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1498 SYSTEM_REG is special purpose register to write.
1499 RT is the input register. */
1502 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1503 struct aarch64_register rt
)
1505 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1506 | ENCODE (rt
.num
, 5, 0));
1509 /* Write a SEVL instruction into *BUF.
1511 This is a hint instruction telling the hardware to trigger an event. */
1514 emit_sevl (uint32_t *buf
)
1516 return aarch64_emit_insn (buf
, SEVL
);
1519 /* Write a WFE instruction into *BUF.
1521 This is a hint instruction telling the hardware to wait for an event. */
1524 emit_wfe (uint32_t *buf
)
1526 return aarch64_emit_insn (buf
, WFE
);
1529 /* Write a SBFM instruction into *BUF.
1531 SBFM rd, rn, #immr, #imms
1533 This instruction moves the bits from #immr to #imms into the
1534 destination, sign extending the result.
1536 RD is the destination register.
1537 RN is the source register.
1538 IMMR is the bit number to start at (least significant bit).
1539 IMMS is the bit number to stop at (most significant bit). */
1542 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1543 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1545 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1546 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1548 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1549 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1550 | ENCODE (rd
.num
, 5, 0));
1553 /* Write a SBFX instruction into *BUF.
1555 SBFX rd, rn, #lsb, #width
1557 This instruction moves #width bits from #lsb into the destination, sign
1558 extending the result. This is an alias for:
1560 SBFM rd, rn, #lsb, #(lsb + width - 1)
1562 RD is the destination register.
1563 RN is the source register.
1564 LSB is the bit number to start at (least significant bit).
1565 WIDTH is the number of bits to move. */
1568 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1569 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1571 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1574 /* Write a UBFM instruction into *BUF.
1576 UBFM rd, rn, #immr, #imms
1578 This instruction moves the bits from #immr to #imms into the
1579 destination, extending the result with zeros.
1581 RD is the destination register.
1582 RN is the source register.
1583 IMMR is the bit number to start at (least significant bit).
1584 IMMS is the bit number to stop at (most significant bit). */
1587 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1588 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1590 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1591 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1593 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1594 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1595 | ENCODE (rd
.num
, 5, 0));
1598 /* Write a UBFX instruction into *BUF.
1600 UBFX rd, rn, #lsb, #width
1602 This instruction moves #width bits from #lsb into the destination,
1603 extending the result with zeros. This is an alias for:
1605 UBFM rd, rn, #lsb, #(lsb + width - 1)
1607 RD is the destination register.
1608 RN is the source register.
1609 LSB is the bit number to start at (least significant bit).
1610 WIDTH is the number of bits to move. */
1613 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1614 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1616 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1619 /* Write a CSINC instruction into *BUF.
1621 CSINC rd, rn, rm, cond
1623 This instruction conditionally increments rn or rm and places the result
1624 in rd. rn is chosen is the condition is true.
1626 RD is the destination register.
1627 RN and RM are the source registers.
1628 COND is the encoded condition. */
1631 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1632 struct aarch64_register rn
, struct aarch64_register rm
,
1635 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1637 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1638 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1639 | ENCODE (rd
.num
, 5, 0));
1642 /* Write a CSET instruction into *BUF.
1646 This instruction conditionally write 1 or 0 in the destination register.
1647 1 is written if the condition is true. This is an alias for:
1649 CSINC rd, xzr, xzr, !cond
1651 Note that the condition needs to be inverted.
1653 RD is the destination register.
1654 RN and RM are the source registers.
1655 COND is the encoded condition. */
1658 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1660 /* The least significant bit of the condition needs toggling in order to
1662 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1665 /* Write LEN instructions from BUF into the inferior memory at *TO.
1667 Note instructions are always little endian on AArch64, unlike data. */
1670 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1672 size_t byte_len
= len
* sizeof (uint32_t);
1673 #if (__BYTE_ORDER == __BIG_ENDIAN)
1674 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1677 for (i
= 0; i
< len
; i
++)
1678 le_buf
[i
] = htole32 (buf
[i
]);
1680 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1684 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1690 /* Sub-class of struct aarch64_insn_data, store information of
1691 instruction relocation for fast tracepoint. Visitor can
1692 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1693 the relocated instructions in buffer pointed by INSN_PTR. */
1695 struct aarch64_insn_relocation_data
1697 struct aarch64_insn_data base
;
1699 /* The new address the instruction is relocated to. */
1701 /* Pointer to the buffer of relocated instruction(s). */
1705 /* Implementation of aarch64_insn_visitor method "b". */
1708 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1709 struct aarch64_insn_data
*data
)
1711 struct aarch64_insn_relocation_data
*insn_reloc
1712 = (struct aarch64_insn_relocation_data
*) data
;
1714 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1716 if (can_encode_int32 (new_offset
, 28))
1717 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1720 /* Implementation of aarch64_insn_visitor method "b_cond". */
1723 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1724 struct aarch64_insn_data
*data
)
1726 struct aarch64_insn_relocation_data
*insn_reloc
1727 = (struct aarch64_insn_relocation_data
*) data
;
1729 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1731 if (can_encode_int32 (new_offset
, 21))
1733 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1736 else if (can_encode_int32 (new_offset
, 28))
1738 /* The offset is out of range for a conditional branch
1739 instruction but not for a unconditional branch. We can use
1740 the following instructions instead:
1742 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1743 B NOT_TAKEN ; Else jump over TAKEN and continue.
1750 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1751 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1752 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1756 /* Implementation of aarch64_insn_visitor method "cb". */
1759 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1760 const unsigned rn
, int is64
,
1761 struct aarch64_insn_data
*data
)
1763 struct aarch64_insn_relocation_data
*insn_reloc
1764 = (struct aarch64_insn_relocation_data
*) data
;
1766 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1768 if (can_encode_int32 (new_offset
, 21))
1770 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1771 aarch64_register (rn
, is64
), new_offset
);
1773 else if (can_encode_int32 (new_offset
, 28))
1775 /* The offset is out of range for a compare and branch
1776 instruction but not for a unconditional branch. We can use
1777 the following instructions instead:
1779 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1780 B NOT_TAKEN ; Else jump over TAKEN and continue.
1786 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1787 aarch64_register (rn
, is64
), 8);
1788 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1789 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1793 /* Implementation of aarch64_insn_visitor method "tb". */
1796 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1797 const unsigned rt
, unsigned bit
,
1798 struct aarch64_insn_data
*data
)
1800 struct aarch64_insn_relocation_data
*insn_reloc
1801 = (struct aarch64_insn_relocation_data
*) data
;
1803 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1805 if (can_encode_int32 (new_offset
, 16))
1807 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1808 aarch64_register (rt
, 1), new_offset
);
1810 else if (can_encode_int32 (new_offset
, 28))
1812 /* The offset is out of range for a test bit and branch
1813 instruction but not for a unconditional branch. We can use
1814 the following instructions instead:
1816 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1817 B NOT_TAKEN ; Else jump over TAKEN and continue.
1823 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1824 aarch64_register (rt
, 1), 8);
1825 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1826 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1831 /* Implementation of aarch64_insn_visitor method "adr". */
1834 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1836 struct aarch64_insn_data
*data
)
1838 struct aarch64_insn_relocation_data
*insn_reloc
1839 = (struct aarch64_insn_relocation_data
*) data
;
1840 /* We know exactly the address the ADR{P,} instruction will compute.
1841 We can just write it to the destination register. */
1842 CORE_ADDR address
= data
->insn_addr
+ offset
;
1846 /* Clear the lower 12 bits of the offset to get the 4K page. */
1847 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1848 aarch64_register (rd
, 1),
1852 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1853 aarch64_register (rd
, 1), address
);
1856 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1859 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1860 const unsigned rt
, const int is64
,
1861 struct aarch64_insn_data
*data
)
1863 struct aarch64_insn_relocation_data
*insn_reloc
1864 = (struct aarch64_insn_relocation_data
*) data
;
1865 CORE_ADDR address
= data
->insn_addr
+ offset
;
1867 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1868 aarch64_register (rt
, 1), address
);
1870 /* We know exactly what address to load from, and what register we
1873 MOV xd, #(oldloc + offset)
1874 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1877 LDR xd, [xd] ; or LDRSW xd, [xd]
1882 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1883 aarch64_register (rt
, 1),
1884 aarch64_register (rt
, 1),
1885 offset_memory_operand (0));
1887 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1888 aarch64_register (rt
, is64
),
1889 aarch64_register (rt
, 1),
1890 offset_memory_operand (0));
1893 /* Implementation of aarch64_insn_visitor method "others". */
1896 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1897 struct aarch64_insn_data
*data
)
1899 struct aarch64_insn_relocation_data
*insn_reloc
1900 = (struct aarch64_insn_relocation_data
*) data
;
1902 /* The instruction is not PC relative. Just re-emit it at the new
1904 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1907 static const struct aarch64_insn_visitor visitor
=
1909 aarch64_ftrace_insn_reloc_b
,
1910 aarch64_ftrace_insn_reloc_b_cond
,
1911 aarch64_ftrace_insn_reloc_cb
,
1912 aarch64_ftrace_insn_reloc_tb
,
1913 aarch64_ftrace_insn_reloc_adr
,
1914 aarch64_ftrace_insn_reloc_ldr_literal
,
1915 aarch64_ftrace_insn_reloc_others
,
1918 /* Implementation of linux_target_ops method
1919 "install_fast_tracepoint_jump_pad". */
1922 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1924 CORE_ADDR collector
,
1927 CORE_ADDR
*jump_entry
,
1928 CORE_ADDR
*trampoline
,
1929 ULONGEST
*trampoline_size
,
1930 unsigned char *jjump_pad_insn
,
1931 ULONGEST
*jjump_pad_insn_size
,
1932 CORE_ADDR
*adjusted_insn_addr
,
1933 CORE_ADDR
*adjusted_insn_addr_end
,
1941 CORE_ADDR buildaddr
= *jump_entry
;
1942 struct aarch64_insn_relocation_data insn_data
;
1944 /* We need to save the current state on the stack both to restore it
1945 later and to collect register values when the tracepoint is hit.
1947 The saved registers are pushed in a layout that needs to be in sync
1948 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1949 the supply_fast_tracepoint_registers function will fill in the
1950 register cache from a pointer to saved registers on the stack we build
1953 For simplicity, we set the size of each cell on the stack to 16 bytes.
1954 This way one cell can hold any register type, from system registers
1955 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1956 has to be 16 bytes aligned anyway.
1958 Note that the CPSR register does not exist on AArch64. Instead we
1959 can access system bits describing the process state with the
1960 MRS/MSR instructions, namely the condition flags. We save them as
1961 if they are part of a CPSR register because that's how GDB
1962 interprets these system bits. At the moment, only the condition
1963 flags are saved in CPSR (NZCV).
1965 Stack layout, each cell is 16 bytes (descending):
1967 High *-------- SIMD&FP registers from 31 down to 0. --------*
1973 *---- General purpose registers from 30 down to 0. ----*
1979 *------------- Special purpose registers. -------------*
1982 | CPSR (NZCV) | 5 cells
1985 *------------- collecting_t object --------------------*
1986 | TPIDR_EL0 | struct tracepoint * |
1987 Low *------------------------------------------------------*
1989 After this stack is set up, we issue a call to the collector, passing
1990 it the saved registers at (SP + 16). */
1992 /* Push SIMD&FP registers on the stack:
1994 SUB sp, sp, #(32 * 16)
1996 STP q30, q31, [sp, #(30 * 16)]
2001 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2002 for (i
= 30; i
>= 0; i
-= 2)
2003 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2005 /* Push general purpose registers on the stack. Note that we do not need
2006 to push x31 as it represents the xzr register and not the stack
2007 pointer in a STR instruction.
2009 SUB sp, sp, #(31 * 16)
2011 STR x30, [sp, #(30 * 16)]
2016 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2017 for (i
= 30; i
>= 0; i
-= 1)
2018 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2019 offset_memory_operand (i
* 16));
2021 /* Make space for 5 more cells.
2023 SUB sp, sp, #(5 * 16)
2026 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2031 ADD x4, sp, #((32 + 31 + 5) * 16)
2032 STR x4, [sp, #(4 * 16)]
2035 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2036 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2038 /* Save PC (tracepoint address):
2043 STR x3, [sp, #(3 * 16)]
2047 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2048 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2050 /* Save CPSR (NZCV), FPSR and FPCR:
2056 STR x2, [sp, #(2 * 16)]
2057 STR x1, [sp, #(1 * 16)]
2058 STR x0, [sp, #(0 * 16)]
2061 p
+= emit_mrs (p
, x2
, NZCV
);
2062 p
+= emit_mrs (p
, x1
, FPSR
);
2063 p
+= emit_mrs (p
, x0
, FPCR
);
2064 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2065 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2066 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2068 /* Push the collecting_t object. It consist of the address of the
2069 tracepoint and an ID for the current thread. We get the latter by
2070 reading the tpidr_el0 system register. It corresponds to the
2071 NT_ARM_TLS register accessible with ptrace.
2078 STP x0, x1, [sp, #-16]!
2082 p
+= emit_mov_addr (p
, x0
, tpoint
);
2083 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2084 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2088 The shared memory for the lock is at lockaddr. It will hold zero
2089 if no-one is holding the lock, otherwise it contains the address of
2090 the collecting_t object on the stack of the thread which acquired it.
2092 At this stage, the stack pointer points to this thread's collecting_t
2095 We use the following registers:
2096 - x0: Address of the lock.
2097 - x1: Pointer to collecting_t object.
2098 - x2: Scratch register.
2104 ; Trigger an event local to this core. So the following WFE
2105 ; instruction is ignored.
2108 ; Wait for an event. The event is triggered by either the SEVL
2109 ; or STLR instructions (store release).
2112 ; Atomically read at lockaddr. This marks the memory location as
2113 ; exclusive. This instruction also has memory constraints which
2114 ; make sure all previous data reads and writes are done before
2118 ; Try again if another thread holds the lock.
2121 ; We can lock it! Write the address of the collecting_t object.
2122 ; This instruction will fail if the memory location is not marked
2123 ; as exclusive anymore. If it succeeds, it will remove the
2124 ; exclusive mark on the memory location. This way, if another
2125 ; thread executes this instruction before us, we will fail and try
2132 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2133 p
+= emit_mov (p
, x1
, register_operand (sp
));
2137 p
+= emit_ldaxr (p
, x2
, x0
);
2138 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2139 p
+= emit_stxr (p
, w2
, x1
, x0
);
2140 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2142 /* Call collector (struct tracepoint *, unsigned char *):
2147 ; Saved registers start after the collecting_t object.
2150 ; We use an intra-procedure-call scratch register.
2151 MOV ip0, #(collector)
2154 ; And call back to C!
2159 p
+= emit_mov_addr (p
, x0
, tpoint
);
2160 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2162 p
+= emit_mov_addr (p
, ip0
, collector
);
2163 p
+= emit_blr (p
, ip0
);
2165 /* Release the lock.
2170 ; This instruction is a normal store with memory ordering
2171 ; constraints. Thanks to this we do not have to put a data
2172 ; barrier instruction to make sure all data read and writes are done
2173 ; before this instruction is executed. Furthermore, this instruction
2174 ; will trigger an event, letting other threads know they can grab
2179 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2180 p
+= emit_stlr (p
, xzr
, x0
);
2182 /* Free collecting_t object:
2187 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2189 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2190 registers from the stack.
2192 LDR x2, [sp, #(2 * 16)]
2193 LDR x1, [sp, #(1 * 16)]
2194 LDR x0, [sp, #(0 * 16)]
2200 ADD sp, sp #(5 * 16)
2203 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2204 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2205 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2206 p
+= emit_msr (p
, NZCV
, x2
);
2207 p
+= emit_msr (p
, FPSR
, x1
);
2208 p
+= emit_msr (p
, FPCR
, x0
);
2210 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2212 /* Pop general purpose registers:
2216 LDR x30, [sp, #(30 * 16)]
2218 ADD sp, sp, #(31 * 16)
2221 for (i
= 0; i
<= 30; i
+= 1)
2222 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2223 offset_memory_operand (i
* 16));
2224 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2226 /* Pop SIMD&FP registers:
2230 LDP q30, q31, [sp, #(30 * 16)]
2232 ADD sp, sp, #(32 * 16)
2235 for (i
= 0; i
<= 30; i
+= 2)
2236 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2237 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2239 /* Write the code into the inferior memory. */
2240 append_insns (&buildaddr
, p
- buf
, buf
);
2242 /* Now emit the relocated instruction. */
2243 *adjusted_insn_addr
= buildaddr
;
2244 target_read_uint32 (tpaddr
, &insn
);
2246 insn_data
.base
.insn_addr
= tpaddr
;
2247 insn_data
.new_addr
= buildaddr
;
2248 insn_data
.insn_ptr
= buf
;
2250 aarch64_relocate_instruction (insn
, &visitor
,
2251 (struct aarch64_insn_data
*) &insn_data
);
2253 /* We may not have been able to relocate the instruction. */
2254 if (insn_data
.insn_ptr
== buf
)
2257 "E.Could not relocate instruction from %s to %s.",
2258 core_addr_to_string_nz (tpaddr
),
2259 core_addr_to_string_nz (buildaddr
));
2263 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2264 *adjusted_insn_addr_end
= buildaddr
;
2266 /* Go back to the start of the buffer. */
2269 /* Emit a branch back from the jump pad. */
2270 offset
= (tpaddr
+ orig_size
- buildaddr
);
2271 if (!can_encode_int32 (offset
, 28))
2274 "E.Jump back from jump pad too far from tracepoint "
2275 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2280 p
+= emit_b (p
, 0, offset
);
2281 append_insns (&buildaddr
, p
- buf
, buf
);
2283 /* Give the caller a branch instruction into the jump pad. */
2284 offset
= (*jump_entry
- tpaddr
);
2285 if (!can_encode_int32 (offset
, 28))
2288 "E.Jump pad too far from tracepoint "
2289 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2294 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2295 *jjump_pad_insn_size
= 4;
2297 /* Return the end address of our pad. */
2298 *jump_entry
= buildaddr
;
2303 /* Helper function writing LEN instructions from START into
2304 current_insn_ptr. */
2307 emit_ops_insns (const uint32_t *start
, int len
)
2309 CORE_ADDR buildaddr
= current_insn_ptr
;
2312 debug_printf ("Adding %d instrucions at %s\n",
2313 len
, paddress (buildaddr
));
2315 append_insns (&buildaddr
, len
, start
);
2316 current_insn_ptr
= buildaddr
;
2319 /* Pop a register from the stack. */
2322 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2324 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2327 /* Push a register on the stack. */
2330 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2332 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2335 /* Implementation of emit_ops method "emit_prologue". */
2338 aarch64_emit_prologue (void)
2343 /* This function emit a prologue for the following function prototype:
2345 enum eval_result_type f (unsigned char *regs,
2348 The first argument is a buffer of raw registers. The second
2349 argument is the result of
2350 evaluating the expression, which will be set to whatever is on top of
2351 the stack at the end.
2353 The stack set up by the prologue is as such:
2355 High *------------------------------------------------------*
2358 | x1 (ULONGEST *value) |
2359 | x0 (unsigned char *regs) |
2360 Low *------------------------------------------------------*
2362 As we are implementing a stack machine, each opcode can expand the
2363 stack so we never know how far we are from the data saved by this
2364 prologue. In order to be able refer to value and regs later, we save
2365 the current stack pointer in the frame pointer. This way, it is not
2366 clobbered when calling C functions.
2368 Finally, throughout every operation, we are using register x0 as the
2369 top of the stack, and x1 as a scratch register. */
2371 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2372 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2373 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2375 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2378 emit_ops_insns (buf
, p
- buf
);
2381 /* Implementation of emit_ops method "emit_epilogue". */
2384 aarch64_emit_epilogue (void)
2389 /* Store the result of the expression (x0) in *value. */
2390 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2391 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2392 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2394 /* Restore the previous state. */
2395 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2396 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2398 /* Return expr_eval_no_error. */
2399 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2400 p
+= emit_ret (p
, lr
);
2402 emit_ops_insns (buf
, p
- buf
);
2405 /* Implementation of emit_ops method "emit_add". */
2408 aarch64_emit_add (void)
2413 p
+= emit_pop (p
, x1
);
2414 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2416 emit_ops_insns (buf
, p
- buf
);
2419 /* Implementation of emit_ops method "emit_sub". */
2422 aarch64_emit_sub (void)
2427 p
+= emit_pop (p
, x1
);
2428 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2430 emit_ops_insns (buf
, p
- buf
);
2433 /* Implementation of emit_ops method "emit_mul". */
2436 aarch64_emit_mul (void)
2441 p
+= emit_pop (p
, x1
);
2442 p
+= emit_mul (p
, x0
, x1
, x0
);
2444 emit_ops_insns (buf
, p
- buf
);
2447 /* Implementation of emit_ops method "emit_lsh". */
2450 aarch64_emit_lsh (void)
2455 p
+= emit_pop (p
, x1
);
2456 p
+= emit_lslv (p
, x0
, x1
, x0
);
2458 emit_ops_insns (buf
, p
- buf
);
2461 /* Implementation of emit_ops method "emit_rsh_signed". */
2464 aarch64_emit_rsh_signed (void)
2469 p
+= emit_pop (p
, x1
);
2470 p
+= emit_asrv (p
, x0
, x1
, x0
);
2472 emit_ops_insns (buf
, p
- buf
);
2475 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2478 aarch64_emit_rsh_unsigned (void)
2483 p
+= emit_pop (p
, x1
);
2484 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2486 emit_ops_insns (buf
, p
- buf
);
2489 /* Implementation of emit_ops method "emit_ext". */
2492 aarch64_emit_ext (int arg
)
2497 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2499 emit_ops_insns (buf
, p
- buf
);
2502 /* Implementation of emit_ops method "emit_log_not". */
2505 aarch64_emit_log_not (void)
2510 /* If the top of the stack is 0, replace it with 1. Else replace it with
2513 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2514 p
+= emit_cset (p
, x0
, EQ
);
2516 emit_ops_insns (buf
, p
- buf
);
2519 /* Implementation of emit_ops method "emit_bit_and". */
2522 aarch64_emit_bit_and (void)
2527 p
+= emit_pop (p
, x1
);
2528 p
+= emit_and (p
, x0
, x0
, x1
);
2530 emit_ops_insns (buf
, p
- buf
);
2533 /* Implementation of emit_ops method "emit_bit_or". */
2536 aarch64_emit_bit_or (void)
2541 p
+= emit_pop (p
, x1
);
2542 p
+= emit_orr (p
, x0
, x0
, x1
);
2544 emit_ops_insns (buf
, p
- buf
);
2547 /* Implementation of emit_ops method "emit_bit_xor". */
2550 aarch64_emit_bit_xor (void)
2555 p
+= emit_pop (p
, x1
);
2556 p
+= emit_eor (p
, x0
, x0
, x1
);
2558 emit_ops_insns (buf
, p
- buf
);
2561 /* Implementation of emit_ops method "emit_bit_not". */
2564 aarch64_emit_bit_not (void)
2569 p
+= emit_mvn (p
, x0
, x0
);
2571 emit_ops_insns (buf
, p
- buf
);
2574 /* Implementation of emit_ops method "emit_equal". */
2577 aarch64_emit_equal (void)
2582 p
+= emit_pop (p
, x1
);
2583 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2584 p
+= emit_cset (p
, x0
, EQ
);
2586 emit_ops_insns (buf
, p
- buf
);
2589 /* Implementation of emit_ops method "emit_less_signed". */
2592 aarch64_emit_less_signed (void)
2597 p
+= emit_pop (p
, x1
);
2598 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2599 p
+= emit_cset (p
, x0
, LT
);
2601 emit_ops_insns (buf
, p
- buf
);
2604 /* Implementation of emit_ops method "emit_less_unsigned". */
2607 aarch64_emit_less_unsigned (void)
2612 p
+= emit_pop (p
, x1
);
2613 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2614 p
+= emit_cset (p
, x0
, LO
);
2616 emit_ops_insns (buf
, p
- buf
);
2619 /* Implementation of emit_ops method "emit_ref". */
2622 aarch64_emit_ref (int size
)
2630 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2633 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2636 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2639 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2642 /* Unknown size, bail on compilation. */
2647 emit_ops_insns (buf
, p
- buf
);
2650 /* Implementation of emit_ops method "emit_if_goto". */
2653 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2658 /* The Z flag is set or cleared here. */
2659 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2660 /* This instruction must not change the Z flag. */
2661 p
+= emit_pop (p
, x0
);
2662 /* Branch over the next instruction if x0 == 0. */
2663 p
+= emit_bcond (p
, EQ
, 8);
2665 /* The NOP instruction will be patched with an unconditional branch. */
2667 *offset_p
= (p
- buf
) * 4;
2672 emit_ops_insns (buf
, p
- buf
);
2675 /* Implementation of emit_ops method "emit_goto". */
2678 aarch64_emit_goto (int *offset_p
, int *size_p
)
2683 /* The NOP instruction will be patched with an unconditional branch. */
2690 emit_ops_insns (buf
, p
- buf
);
2693 /* Implementation of emit_ops method "write_goto_address". */
2696 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2700 emit_b (&insn
, 0, to
- from
);
2701 append_insns (&from
, 1, &insn
);
2704 /* Implementation of emit_ops method "emit_const". */
2707 aarch64_emit_const (LONGEST num
)
2712 p
+= emit_mov_addr (p
, x0
, num
);
2714 emit_ops_insns (buf
, p
- buf
);
2717 /* Implementation of emit_ops method "emit_call". */
2720 aarch64_emit_call (CORE_ADDR fn
)
2725 p
+= emit_mov_addr (p
, ip0
, fn
);
2726 p
+= emit_blr (p
, ip0
);
2728 emit_ops_insns (buf
, p
- buf
);
2731 /* Implementation of emit_ops method "emit_reg". */
2734 aarch64_emit_reg (int reg
)
2739 /* Set x0 to unsigned char *regs. */
2740 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2741 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2742 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2744 emit_ops_insns (buf
, p
- buf
);
2746 aarch64_emit_call (get_raw_reg_func_addr ());
2749 /* Implementation of emit_ops method "emit_pop". */
2752 aarch64_emit_pop (void)
2757 p
+= emit_pop (p
, x0
);
2759 emit_ops_insns (buf
, p
- buf
);
2762 /* Implementation of emit_ops method "emit_stack_flush". */
2765 aarch64_emit_stack_flush (void)
2770 p
+= emit_push (p
, x0
);
2772 emit_ops_insns (buf
, p
- buf
);
2775 /* Implementation of emit_ops method "emit_zero_ext". */
2778 aarch64_emit_zero_ext (int arg
)
2783 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2785 emit_ops_insns (buf
, p
- buf
);
2788 /* Implementation of emit_ops method "emit_swap". */
2791 aarch64_emit_swap (void)
2796 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2797 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2798 p
+= emit_mov (p
, x0
, register_operand (x1
));
2800 emit_ops_insns (buf
, p
- buf
);
2803 /* Implementation of emit_ops method "emit_stack_adjust". */
2806 aarch64_emit_stack_adjust (int n
)
2808 /* This is not needed with our design. */
2812 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2814 emit_ops_insns (buf
, p
- buf
);
2817 /* Implementation of emit_ops method "emit_int_call_1". */
2820 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2825 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2827 emit_ops_insns (buf
, p
- buf
);
2829 aarch64_emit_call (fn
);
2832 /* Implementation of emit_ops method "emit_void_call_2". */
2835 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2840 /* Push x0 on the stack. */
2841 aarch64_emit_stack_flush ();
2843 /* Setup arguments for the function call:
2846 x1: top of the stack
2851 p
+= emit_mov (p
, x1
, register_operand (x0
));
2852 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2854 emit_ops_insns (buf
, p
- buf
);
2856 aarch64_emit_call (fn
);
2859 aarch64_emit_pop ();
2862 /* Implementation of emit_ops method "emit_eq_goto". */
2865 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2870 p
+= emit_pop (p
, x1
);
2871 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2872 /* Branch over the next instruction if x0 != x1. */
2873 p
+= emit_bcond (p
, NE
, 8);
2874 /* The NOP instruction will be patched with an unconditional branch. */
2876 *offset_p
= (p
- buf
) * 4;
2881 emit_ops_insns (buf
, p
- buf
);
2884 /* Implementation of emit_ops method "emit_ne_goto". */
2887 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2892 p
+= emit_pop (p
, x1
);
2893 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2894 /* Branch over the next instruction if x0 == x1. */
2895 p
+= emit_bcond (p
, EQ
, 8);
2896 /* The NOP instruction will be patched with an unconditional branch. */
2898 *offset_p
= (p
- buf
) * 4;
2903 emit_ops_insns (buf
, p
- buf
);
2906 /* Implementation of emit_ops method "emit_lt_goto". */
2909 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2914 p
+= emit_pop (p
, x1
);
2915 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2916 /* Branch over the next instruction if x0 >= x1. */
2917 p
+= emit_bcond (p
, GE
, 8);
2918 /* The NOP instruction will be patched with an unconditional branch. */
2920 *offset_p
= (p
- buf
) * 4;
2925 emit_ops_insns (buf
, p
- buf
);
2928 /* Implementation of emit_ops method "emit_le_goto". */
2931 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2936 p
+= emit_pop (p
, x1
);
2937 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2938 /* Branch over the next instruction if x0 > x1. */
2939 p
+= emit_bcond (p
, GT
, 8);
2940 /* The NOP instruction will be patched with an unconditional branch. */
2942 *offset_p
= (p
- buf
) * 4;
2947 emit_ops_insns (buf
, p
- buf
);
2950 /* Implementation of emit_ops method "emit_gt_goto". */
2953 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2958 p
+= emit_pop (p
, x1
);
2959 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2960 /* Branch over the next instruction if x0 <= x1. */
2961 p
+= emit_bcond (p
, LE
, 8);
2962 /* The NOP instruction will be patched with an unconditional branch. */
2964 *offset_p
= (p
- buf
) * 4;
2969 emit_ops_insns (buf
, p
- buf
);
2972 /* Implementation of emit_ops method "emit_ge_got". */
2975 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2980 p
+= emit_pop (p
, x1
);
2981 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2982 /* Branch over the next instruction if x0 <= x1. */
2983 p
+= emit_bcond (p
, LT
, 8);
2984 /* The NOP instruction will be patched with an unconditional branch. */
2986 *offset_p
= (p
- buf
) * 4;
2991 emit_ops_insns (buf
, p
- buf
);
2994 static struct emit_ops aarch64_emit_ops_impl
=
2996 aarch64_emit_prologue
,
2997 aarch64_emit_epilogue
,
3002 aarch64_emit_rsh_signed
,
3003 aarch64_emit_rsh_unsigned
,
3005 aarch64_emit_log_not
,
3006 aarch64_emit_bit_and
,
3007 aarch64_emit_bit_or
,
3008 aarch64_emit_bit_xor
,
3009 aarch64_emit_bit_not
,
3011 aarch64_emit_less_signed
,
3012 aarch64_emit_less_unsigned
,
3014 aarch64_emit_if_goto
,
3016 aarch64_write_goto_address
,
3021 aarch64_emit_stack_flush
,
3022 aarch64_emit_zero_ext
,
3024 aarch64_emit_stack_adjust
,
3025 aarch64_emit_int_call_1
,
3026 aarch64_emit_void_call_2
,
3027 aarch64_emit_eq_goto
,
3028 aarch64_emit_ne_goto
,
3029 aarch64_emit_lt_goto
,
3030 aarch64_emit_le_goto
,
3031 aarch64_emit_gt_goto
,
3032 aarch64_emit_ge_got
,
3035 /* Implementation of linux_target_ops method "emit_ops". */
3037 static struct emit_ops
*
3038 aarch64_emit_ops (void)
3040 return &aarch64_emit_ops_impl
;
3043 /* Implementation of linux_target_ops method
3044 "get_min_fast_tracepoint_insn_len". */
3047 aarch64_get_min_fast_tracepoint_insn_len (void)
3052 /* Implementation of linux_target_ops method "supports_range_stepping". */
3055 aarch64_supports_range_stepping (void)
3060 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3063 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3065 if (is_64bit_tdesc ())
3067 *size
= aarch64_breakpoint_len
;
3068 return aarch64_breakpoint
;
3071 return arm_sw_breakpoint_from_kind (kind
, size
);
3074 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3077 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3079 if (is_64bit_tdesc ())
3080 return aarch64_breakpoint_len
;
3082 return arm_breakpoint_kind_from_pc (pcptr
);
3085 /* Implementation of the target ops method
3086 "breakpoint_kind_from_current_state". */
3089 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3091 if (is_64bit_tdesc ())
3092 return aarch64_breakpoint_len
;
3094 return arm_breakpoint_kind_from_current_state (pcptr
);
3097 /* Support for hardware single step. */
3100 aarch64_supports_hardware_single_step (void)
3105 struct linux_target_ops the_low_target
=
3107 aarch64_supports_z_point_type
,
3108 aarch64_insert_point
,
3109 aarch64_remove_point
,
3110 aarch64_stopped_by_watchpoint
,
3111 aarch64_stopped_data_address
,
3112 NULL
, /* collect_ptrace_register */
3113 NULL
, /* supply_ptrace_register */
3114 aarch64_linux_siginfo_fixup
,
3115 aarch64_linux_new_process
,
3116 aarch64_linux_delete_process
,
3117 aarch64_linux_new_thread
,
3118 aarch64_linux_delete_thread
,
3119 aarch64_linux_new_fork
,
3120 aarch64_linux_prepare_to_resume
,
3121 NULL
, /* process_qsupported */
3122 aarch64_supports_tracepoints
,
3123 aarch64_get_thread_area
,
3124 aarch64_install_fast_tracepoint_jump_pad
,
3126 aarch64_get_min_fast_tracepoint_insn_len
,
3127 aarch64_supports_range_stepping
,
3128 aarch64_supports_hardware_single_step
,
3129 aarch64_get_syscall_trapinfo
,
3132 /* The linux target ops object. */
3134 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3137 initialize_low_arch (void)
3139 initialize_low_arch_aarch32 ();
3141 initialize_regsets_info (&aarch64_regsets_info
);
3142 initialize_regsets_info (&aarch64_sve_regsets_info
);