1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
70 void low_arch_setup () override
;
72 bool low_cannot_fetch_register (int regno
) override
;
74 bool low_cannot_store_register (int regno
) override
;
76 bool low_supports_breakpoints () override
;
78 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
80 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
82 bool low_breakpoint_at (CORE_ADDR pc
) override
;
84 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
85 int size
, raw_breakpoint
*bp
) override
;
87 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
88 int size
, raw_breakpoint
*bp
) override
;
90 bool low_stopped_by_watchpoint () override
;
92 CORE_ADDR
low_stopped_data_address () override
;
95 /* The singleton target ops object. */
97 static aarch64_target the_aarch64_target
;
100 aarch64_target::low_cannot_fetch_register (int regno
)
102 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
103 "is not implemented by the target");
107 aarch64_target::low_cannot_store_register (int regno
)
109 gdb_assert_not_reached ("linux target op low_cannot_store_register "
110 "is not implemented by the target");
113 /* Per-process arch-specific data we want to keep. */
115 struct arch_process_info
117 /* Hardware breakpoint/watchpoint data.
118 The reason for them to be per-process rather than per-thread is
119 due to the lack of information in the gdbserver environment;
120 gdbserver is not told that whether a requested hardware
121 breakpoint/watchpoint is thread specific or not, so it has to set
122 each hw bp/wp for every thread in the current process. The
123 higher level bp/wp management in gdb will resume a thread if a hw
124 bp/wp trap is not expected for it. Since the hw bp/wp setting is
125 same for each thread, it is reasonable for the data to live here.
127 struct aarch64_debug_reg_state debug_reg_state
;
130 /* Return true if the size of register 0 is 8 byte. */
133 is_64bit_tdesc (void)
135 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
137 return register_size (regcache
->tdesc
, 0) == 8;
140 /* Return true if the regcache contains the number of SVE registers. */
145 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
147 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
151 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
153 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
156 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
157 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
158 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
159 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
160 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
164 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
166 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
169 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
170 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
171 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
172 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
173 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
177 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
179 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
182 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
183 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
184 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
185 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
189 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
191 const struct user_fpsimd_state
*regset
192 = (const struct user_fpsimd_state
*) buf
;
195 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
196 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
197 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
198 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
201 /* Store the pauth registers to regcache. */
204 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
206 uint64_t *pauth_regset
= (uint64_t *) buf
;
207 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
212 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
214 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
219 aarch64_target::low_supports_breakpoints ()
224 /* Implementation of linux target ops method "low_get_pc". */
227 aarch64_target::low_get_pc (regcache
*regcache
)
229 if (register_size (regcache
->tdesc
, 0) == 8)
230 return linux_get_pc_64bit (regcache
);
232 return linux_get_pc_32bit (regcache
);
235 /* Implementation of linux target ops method "low_set_pc". */
238 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
240 if (register_size (regcache
->tdesc
, 0) == 8)
241 linux_set_pc_64bit (regcache
, pc
);
243 linux_set_pc_32bit (regcache
, pc
);
246 #define aarch64_breakpoint_len 4
248 /* AArch64 BRK software debug mode instruction.
249 This instruction needs to match gdb/aarch64-tdep.c
250 (aarch64_default_breakpoint). */
251 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
253 /* Implementation of linux target ops method "low_breakpoint_at". */
256 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
258 if (is_64bit_tdesc ())
260 gdb_byte insn
[aarch64_breakpoint_len
];
262 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
263 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
269 return arm_breakpoint_at (where
);
273 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
277 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
279 state
->dr_addr_bp
[i
] = 0;
280 state
->dr_ctrl_bp
[i
] = 0;
281 state
->dr_ref_count_bp
[i
] = 0;
284 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
286 state
->dr_addr_wp
[i
] = 0;
287 state
->dr_ctrl_wp
[i
] = 0;
288 state
->dr_ref_count_wp
[i
] = 0;
292 /* Return the pointer to the debug register state structure in the
293 current process' arch-specific data area. */
295 struct aarch64_debug_reg_state
*
296 aarch64_get_debug_reg_state (pid_t pid
)
298 struct process_info
*proc
= find_process_pid (pid
);
300 return &proc
->priv
->arch_private
->debug_reg_state
;
303 /* Implementation of target ops method "supports_z_point_type". */
306 aarch64_target::supports_z_point_type (char z_type
)
312 case Z_PACKET_WRITE_WP
:
313 case Z_PACKET_READ_WP
:
314 case Z_PACKET_ACCESS_WP
:
321 /* Implementation of linux target ops method "low_insert_point".
323 It actually only records the info of the to-be-inserted bp/wp;
324 the actual insertion will happen when threads are resumed. */
327 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
328 int len
, raw_breakpoint
*bp
)
331 enum target_hw_bp_type targ_type
;
332 struct aarch64_debug_reg_state
*state
333 = aarch64_get_debug_reg_state (pid_of (current_thread
));
336 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
337 (unsigned long) addr
, len
);
339 /* Determine the type from the raw breakpoint type. */
340 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
342 if (targ_type
!= hw_execute
)
344 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
345 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
346 1 /* is_insert */, state
);
354 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
355 instruction. Set it to 2 to correctly encode length bit
356 mask in hardware/watchpoint control register. */
359 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
360 1 /* is_insert */, state
);
364 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
370 /* Implementation of linux target ops method "low_remove_point".
372 It actually only records the info of the to-be-removed bp/wp,
373 the actual removal will be done when threads are resumed. */
376 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
377 int len
, raw_breakpoint
*bp
)
380 enum target_hw_bp_type targ_type
;
381 struct aarch64_debug_reg_state
*state
382 = aarch64_get_debug_reg_state (pid_of (current_thread
));
385 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
386 (unsigned long) addr
, len
);
388 /* Determine the type from the raw breakpoint type. */
389 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
391 /* Set up state pointers. */
392 if (targ_type
!= hw_execute
)
394 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
400 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
401 instruction. Set it to 2 to correctly encode length bit
402 mask in hardware/watchpoint control register. */
405 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
406 0 /* is_insert */, state
);
410 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
416 /* Implementation of linux target ops method "low_stopped_data_address". */
419 aarch64_target::low_stopped_data_address ()
423 struct aarch64_debug_reg_state
*state
;
425 pid
= lwpid_of (current_thread
);
427 /* Get the siginfo. */
428 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
429 return (CORE_ADDR
) 0;
431 /* Need to be a hardware breakpoint/watchpoint trap. */
432 if (siginfo
.si_signo
!= SIGTRAP
433 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
434 return (CORE_ADDR
) 0;
436 /* Check if the address matches any watched address. */
437 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
438 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
440 const unsigned int offset
441 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
442 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
443 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
444 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
445 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
446 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
448 if (state
->dr_ref_count_wp
[i
]
449 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
450 && addr_trap
>= addr_watch_aligned
451 && addr_trap
< addr_watch
+ len
)
453 /* ADDR_TRAP reports the first address of the memory range
454 accessed by the CPU, regardless of what was the memory
455 range watched. Thus, a large CPU access that straddles
456 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
457 ADDR_TRAP that is lower than the
458 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
460 addr: | 4 | 5 | 6 | 7 | 8 |
461 |---- range watched ----|
462 |----------- range accessed ------------|
464 In this case, ADDR_TRAP will be 4.
466 To match a watchpoint known to GDB core, we must never
467 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
468 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
469 positive on kernels older than 4.10. See PR
475 return (CORE_ADDR
) 0;
478 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
481 aarch64_target::low_stopped_by_watchpoint ()
483 return (low_stopped_data_address () != 0);
486 /* Fetch the thread-local storage pointer for libthread_db. */
489 ps_get_thread_area (struct ps_prochandle
*ph
,
490 lwpid_t lwpid
, int idx
, void **base
)
492 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
496 /* Implementation of linux_target_ops method "siginfo_fixup". */
499 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
501 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
502 if (!is_64bit_tdesc ())
505 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
508 aarch64_siginfo_from_compat_siginfo (native
,
509 (struct compat_siginfo
*) inf
);
517 /* Implementation of linux_target_ops method "new_process". */
519 static struct arch_process_info
*
520 aarch64_linux_new_process (void)
522 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
524 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
529 /* Implementation of linux_target_ops method "delete_process". */
532 aarch64_linux_delete_process (struct arch_process_info
*info
)
537 /* Implementation of linux_target_ops method "linux_new_fork". */
540 aarch64_linux_new_fork (struct process_info
*parent
,
541 struct process_info
*child
)
543 /* These are allocated by linux_add_process. */
544 gdb_assert (parent
->priv
!= NULL
545 && parent
->priv
->arch_private
!= NULL
);
546 gdb_assert (child
->priv
!= NULL
547 && child
->priv
->arch_private
!= NULL
);
549 /* Linux kernel before 2.6.33 commit
550 72f674d203cd230426437cdcf7dd6f681dad8b0d
551 will inherit hardware debug registers from parent
552 on fork/vfork/clone. Newer Linux kernels create such tasks with
553 zeroed debug registers.
555 GDB core assumes the child inherits the watchpoints/hw
556 breakpoints of the parent, and will remove them all from the
557 forked off process. Copy the debug registers mirrors into the
558 new process so that all breakpoints and watchpoints can be
559 removed together. The debug registers mirror will become zeroed
560 in the end before detaching the forked off process, thus making
561 this compatible with older Linux kernels too. */
563 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
566 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
567 #define AARCH64_HWCAP_PACA (1 << 30)
569 /* Implementation of linux target ops method "low_arch_setup". */
572 aarch64_target::low_arch_setup ()
574 unsigned int machine
;
578 tid
= lwpid_of (current_thread
);
580 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
584 uint64_t vq
= aarch64_sve_get_vq (tid
);
585 unsigned long hwcap
= linux_get_hwcap (8);
586 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
588 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
591 current_process ()->tdesc
= aarch32_linux_read_description ();
593 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
596 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
599 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
601 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
604 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
607 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
609 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
612 static struct regset_info aarch64_regsets
[] =
614 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
615 sizeof (struct user_pt_regs
), GENERAL_REGS
,
616 aarch64_fill_gregset
, aarch64_store_gregset
},
617 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
618 sizeof (struct user_fpsimd_state
), FP_REGS
,
619 aarch64_fill_fpregset
, aarch64_store_fpregset
621 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
622 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
623 NULL
, aarch64_store_pauthregset
},
627 static struct regsets_info aarch64_regsets_info
=
629 aarch64_regsets
, /* regsets */
631 NULL
, /* disabled_regsets */
634 static struct regs_info regs_info_aarch64
=
636 NULL
, /* regset_bitmap */
638 &aarch64_regsets_info
,
641 static struct regset_info aarch64_sve_regsets
[] =
643 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
644 sizeof (struct user_pt_regs
), GENERAL_REGS
,
645 aarch64_fill_gregset
, aarch64_store_gregset
},
646 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
647 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
648 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
650 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
651 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
652 NULL
, aarch64_store_pauthregset
},
656 static struct regsets_info aarch64_sve_regsets_info
=
658 aarch64_sve_regsets
, /* regsets. */
659 0, /* num_regsets. */
660 NULL
, /* disabled_regsets. */
663 static struct regs_info regs_info_aarch64_sve
=
665 NULL
, /* regset_bitmap. */
667 &aarch64_sve_regsets_info
,
670 /* Implementation of linux target ops method "get_regs_info". */
673 aarch64_target::get_regs_info ()
675 if (!is_64bit_tdesc ())
676 return ®s_info_aarch32
;
679 return ®s_info_aarch64_sve
;
681 return ®s_info_aarch64
;
684 /* Implementation of linux_target_ops method "supports_tracepoints". */
687 aarch64_supports_tracepoints (void)
689 if (current_thread
== NULL
)
693 /* We don't support tracepoints on aarch32 now. */
694 return is_64bit_tdesc ();
698 /* Implementation of linux_target_ops method "get_thread_area". */
701 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
706 iovec
.iov_base
= ®
;
707 iovec
.iov_len
= sizeof (reg
);
709 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
717 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
720 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
722 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
728 collect_register_by_name (regcache
, "x8", &l_sysno
);
729 *sysno
= (int) l_sysno
;
732 collect_register_by_name (regcache
, "r7", sysno
);
735 /* List of condition codes that we need. */
737 enum aarch64_condition_codes
748 enum aarch64_operand_type
754 /* Representation of an operand. At this time, it only supports register
755 and immediate types. */
757 struct aarch64_operand
759 /* Type of the operand. */
760 enum aarch64_operand_type type
;
762 /* Value of the operand according to the type. */
766 struct aarch64_register reg
;
770 /* List of registers that we are currently using, we can add more here as
771 we need to use them. */
773 /* General purpose scratch registers (64 bit). */
774 static const struct aarch64_register x0
= { 0, 1 };
775 static const struct aarch64_register x1
= { 1, 1 };
776 static const struct aarch64_register x2
= { 2, 1 };
777 static const struct aarch64_register x3
= { 3, 1 };
778 static const struct aarch64_register x4
= { 4, 1 };
780 /* General purpose scratch registers (32 bit). */
781 static const struct aarch64_register w0
= { 0, 0 };
782 static const struct aarch64_register w2
= { 2, 0 };
784 /* Intra-procedure scratch registers. */
785 static const struct aarch64_register ip0
= { 16, 1 };
787 /* Special purpose registers. */
788 static const struct aarch64_register fp
= { 29, 1 };
789 static const struct aarch64_register lr
= { 30, 1 };
790 static const struct aarch64_register sp
= { 31, 1 };
791 static const struct aarch64_register xzr
= { 31, 1 };
793 /* Dynamically allocate a new register. If we know the register
794 statically, we should make it a global as above instead of using this
797 static struct aarch64_register
798 aarch64_register (unsigned num
, int is64
)
800 return (struct aarch64_register
) { num
, is64
};
803 /* Helper function to create a register operand, for instructions with
804 different types of operands.
807 p += emit_mov (p, x0, register_operand (x1)); */
809 static struct aarch64_operand
810 register_operand (struct aarch64_register reg
)
812 struct aarch64_operand operand
;
814 operand
.type
= OPERAND_REGISTER
;
820 /* Helper function to create an immediate operand, for instructions with
821 different types of operands.
824 p += emit_mov (p, x0, immediate_operand (12)); */
826 static struct aarch64_operand
827 immediate_operand (uint32_t imm
)
829 struct aarch64_operand operand
;
831 operand
.type
= OPERAND_IMMEDIATE
;
837 /* Helper function to create an offset memory operand.
840 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
842 static struct aarch64_memory_operand
843 offset_memory_operand (int32_t offset
)
845 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
848 /* Helper function to create a pre-index memory operand.
851 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
853 static struct aarch64_memory_operand
854 preindex_memory_operand (int32_t index
)
856 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
859 /* Helper function to create a post-index memory operand.
862 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
864 static struct aarch64_memory_operand
865 postindex_memory_operand (int32_t index
)
867 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
870 /* System control registers. These special registers can be written and
871 read with the MRS and MSR instructions.
873 - NZCV: Condition flags. GDB refers to this register under the CPSR
875 - FPSR: Floating-point status register.
876 - FPCR: Floating-point control registers.
877 - TPIDR_EL0: Software thread ID register. */
879 enum aarch64_system_control_registers
881 /* op0 op1 crn crm op2 */
882 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
883 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
884 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
885 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
888 /* Write a BLR instruction into *BUF.
892 RN is the register to branch to. */
895 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
897 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
900 /* Write a RET instruction into *BUF.
904 RN is the register to branch to. */
907 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
909 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
913 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
914 struct aarch64_register rt
,
915 struct aarch64_register rt2
,
916 struct aarch64_register rn
,
917 struct aarch64_memory_operand operand
)
924 opc
= ENCODE (2, 2, 30);
926 opc
= ENCODE (0, 2, 30);
928 switch (operand
.type
)
930 case MEMORY_OPERAND_OFFSET
:
932 pre_index
= ENCODE (1, 1, 24);
933 write_back
= ENCODE (0, 1, 23);
936 case MEMORY_OPERAND_POSTINDEX
:
938 pre_index
= ENCODE (0, 1, 24);
939 write_back
= ENCODE (1, 1, 23);
942 case MEMORY_OPERAND_PREINDEX
:
944 pre_index
= ENCODE (1, 1, 24);
945 write_back
= ENCODE (1, 1, 23);
952 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
953 | ENCODE (operand
.index
>> 3, 7, 15)
954 | ENCODE (rt2
.num
, 5, 10)
955 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
958 /* Write a STP instruction into *BUF.
960 STP rt, rt2, [rn, #offset]
961 STP rt, rt2, [rn, #index]!
962 STP rt, rt2, [rn], #index
964 RT and RT2 are the registers to store.
965 RN is the base address register.
966 OFFSET is the immediate to add to the base address. It is limited to a
967 -512 .. 504 range (7 bits << 3). */
970 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
971 struct aarch64_register rt2
, struct aarch64_register rn
,
972 struct aarch64_memory_operand operand
)
974 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
977 /* Write a LDP instruction into *BUF.
979 LDP rt, rt2, [rn, #offset]
980 LDP rt, rt2, [rn, #index]!
981 LDP rt, rt2, [rn], #index
983 RT and RT2 are the registers to store.
984 RN is the base address register.
985 OFFSET is the immediate to add to the base address. It is limited to a
986 -512 .. 504 range (7 bits << 3). */
989 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
990 struct aarch64_register rt2
, struct aarch64_register rn
,
991 struct aarch64_memory_operand operand
)
993 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
996 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
998 LDP qt, qt2, [rn, #offset]
1000 RT and RT2 are the Q registers to store.
1001 RN is the base address register.
1002 OFFSET is the immediate to add to the base address. It is limited to
1003 -1024 .. 1008 range (7 bits << 4). */
1006 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1007 struct aarch64_register rn
, int32_t offset
)
1009 uint32_t opc
= ENCODE (2, 2, 30);
1010 uint32_t pre_index
= ENCODE (1, 1, 24);
1012 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1013 | ENCODE (offset
>> 4, 7, 15)
1014 | ENCODE (rt2
, 5, 10)
1015 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1018 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1020 STP qt, qt2, [rn, #offset]
1022 RT and RT2 are the Q registers to store.
1023 RN is the base address register.
1024 OFFSET is the immediate to add to the base address. It is limited to
1025 -1024 .. 1008 range (7 bits << 4). */
1028 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1029 struct aarch64_register rn
, int32_t offset
)
1031 uint32_t opc
= ENCODE (2, 2, 30);
1032 uint32_t pre_index
= ENCODE (1, 1, 24);
1034 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1035 | ENCODE (offset
>> 4, 7, 15)
1036 | ENCODE (rt2
, 5, 10)
1037 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1040 /* Write a LDRH instruction into *BUF.
1042 LDRH wt, [xn, #offset]
1043 LDRH wt, [xn, #index]!
1044 LDRH wt, [xn], #index
1046 RT is the register to store.
1047 RN is the base address register.
1048 OFFSET is the immediate to add to the base address. It is limited to
1049 0 .. 32760 range (12 bits << 3). */
1052 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1053 struct aarch64_register rn
,
1054 struct aarch64_memory_operand operand
)
1056 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1059 /* Write a LDRB instruction into *BUF.
1061 LDRB wt, [xn, #offset]
1062 LDRB wt, [xn, #index]!
1063 LDRB wt, [xn], #index
1065 RT is the register to store.
1066 RN is the base address register.
1067 OFFSET is the immediate to add to the base address. It is limited to
1068 0 .. 32760 range (12 bits << 3). */
1071 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1072 struct aarch64_register rn
,
1073 struct aarch64_memory_operand operand
)
1075 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1080 /* Write a STR instruction into *BUF.
1082 STR rt, [rn, #offset]
1083 STR rt, [rn, #index]!
1084 STR rt, [rn], #index
1086 RT is the register to store.
1087 RN is the base address register.
1088 OFFSET is the immediate to add to the base address. It is limited to
1089 0 .. 32760 range (12 bits << 3). */
1092 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1093 struct aarch64_register rn
,
1094 struct aarch64_memory_operand operand
)
1096 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1099 /* Helper function emitting an exclusive load or store instruction. */
1102 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1103 enum aarch64_opcodes opcode
,
1104 struct aarch64_register rs
,
1105 struct aarch64_register rt
,
1106 struct aarch64_register rt2
,
1107 struct aarch64_register rn
)
1109 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1110 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1111 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1114 /* Write a LAXR instruction into *BUF.
1118 RT is the destination register.
1119 RN is the base address register. */
1122 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1123 struct aarch64_register rn
)
1125 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1129 /* Write a STXR instruction into *BUF.
1133 RS is the result register, it indicates if the store succeeded or not.
1134 RT is the destination register.
1135 RN is the base address register. */
1138 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1139 struct aarch64_register rt
, struct aarch64_register rn
)
1141 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1145 /* Write a STLR instruction into *BUF.
1149 RT is the register to store.
1150 RN is the base address register. */
1153 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1154 struct aarch64_register rn
)
1156 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1160 /* Helper function for data processing instructions with register sources. */
1163 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1164 struct aarch64_register rd
,
1165 struct aarch64_register rn
,
1166 struct aarch64_register rm
)
1168 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1170 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1171 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1174 /* Helper function for data processing instructions taking either a register
1178 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1179 struct aarch64_register rd
,
1180 struct aarch64_register rn
,
1181 struct aarch64_operand operand
)
1183 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1184 /* The opcode is different for register and immediate source operands. */
1185 uint32_t operand_opcode
;
1187 if (operand
.type
== OPERAND_IMMEDIATE
)
1189 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1190 operand_opcode
= ENCODE (8, 4, 25);
1192 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1193 | ENCODE (operand
.imm
, 12, 10)
1194 | ENCODE (rn
.num
, 5, 5)
1195 | ENCODE (rd
.num
, 5, 0));
1199 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1200 operand_opcode
= ENCODE (5, 4, 25);
1202 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1207 /* Write an ADD instruction into *BUF.
1212 This function handles both an immediate and register add.
1214 RD is the destination register.
1215 RN is the input register.
1216 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1217 OPERAND_REGISTER. */
1220 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1221 struct aarch64_register rn
, struct aarch64_operand operand
)
1223 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1226 /* Write a SUB instruction into *BUF.
1231 This function handles both an immediate and register sub.
1233 RD is the destination register.
1234 RN is the input register.
1235 IMM is the immediate to substract to RN. */
1238 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1239 struct aarch64_register rn
, struct aarch64_operand operand
)
1241 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1244 /* Write a MOV instruction into *BUF.
1249 This function handles both a wide immediate move and a register move,
1250 with the condition that the source register is not xzr. xzr and the
1251 stack pointer share the same encoding and this function only supports
1254 RD is the destination register.
1255 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1256 OPERAND_REGISTER. */
1259 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1260 struct aarch64_operand operand
)
1262 if (operand
.type
== OPERAND_IMMEDIATE
)
1264 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1265 /* Do not shift the immediate. */
1266 uint32_t shift
= ENCODE (0, 2, 21);
1268 return aarch64_emit_insn (buf
, MOV
| size
| shift
1269 | ENCODE (operand
.imm
, 16, 5)
1270 | ENCODE (rd
.num
, 5, 0));
1273 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1276 /* Write a MOVK instruction into *BUF.
1278 MOVK rd, #imm, lsl #shift
1280 RD is the destination register.
1281 IMM is the immediate.
1282 SHIFT is the logical shift left to apply to IMM. */
1285 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1288 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1290 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1291 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1294 /* Write instructions into *BUF in order to move ADDR into a register.
1295 ADDR can be a 64-bit value.
1297 This function will emit a series of MOV and MOVK instructions, such as:
1300 MOVK xd, #(addr >> 16), lsl #16
1301 MOVK xd, #(addr >> 32), lsl #32
1302 MOVK xd, #(addr >> 48), lsl #48 */
1305 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1309 /* The MOV (wide immediate) instruction clears to top bits of the
1311 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1313 if ((addr
>> 16) != 0)
1314 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1318 if ((addr
>> 32) != 0)
1319 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1323 if ((addr
>> 48) != 0)
1324 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1329 /* Write a SUBS instruction into *BUF.
1333 This instruction update the condition flags.
1335 RD is the destination register.
1336 RN and RM are the source registers. */
1339 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1340 struct aarch64_register rn
, struct aarch64_operand operand
)
1342 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1345 /* Write a CMP instruction into *BUF.
1349 This instruction is an alias of SUBS xzr, rn, rm.
1351 RN and RM are the registers to compare. */
1354 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1355 struct aarch64_operand operand
)
1357 return emit_subs (buf
, xzr
, rn
, operand
);
1360 /* Write a AND instruction into *BUF.
1364 RD is the destination register.
1365 RN and RM are the source registers. */
1368 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1369 struct aarch64_register rn
, struct aarch64_register rm
)
1371 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1374 /* Write a ORR instruction into *BUF.
1378 RD is the destination register.
1379 RN and RM are the source registers. */
1382 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1383 struct aarch64_register rn
, struct aarch64_register rm
)
1385 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1388 /* Write a ORN instruction into *BUF.
1392 RD is the destination register.
1393 RN and RM are the source registers. */
1396 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1397 struct aarch64_register rn
, struct aarch64_register rm
)
1399 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1402 /* Write a EOR instruction into *BUF.
1406 RD is the destination register.
1407 RN and RM are the source registers. */
1410 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1411 struct aarch64_register rn
, struct aarch64_register rm
)
1413 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1416 /* Write a MVN instruction into *BUF.
1420 This is an alias for ORN rd, xzr, rm.
1422 RD is the destination register.
1423 RM is the source register. */
1426 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1427 struct aarch64_register rm
)
1429 return emit_orn (buf
, rd
, xzr
, rm
);
1432 /* Write a LSLV instruction into *BUF.
1436 RD is the destination register.
1437 RN and RM are the source registers. */
1440 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1441 struct aarch64_register rn
, struct aarch64_register rm
)
1443 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1446 /* Write a LSRV instruction into *BUF.
1450 RD is the destination register.
1451 RN and RM are the source registers. */
1454 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1455 struct aarch64_register rn
, struct aarch64_register rm
)
1457 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1460 /* Write a ASRV instruction into *BUF.
1464 RD is the destination register.
1465 RN and RM are the source registers. */
1468 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1469 struct aarch64_register rn
, struct aarch64_register rm
)
1471 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1474 /* Write a MUL instruction into *BUF.
1478 RD is the destination register.
1479 RN and RM are the source registers. */
1482 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1483 struct aarch64_register rn
, struct aarch64_register rm
)
1485 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1488 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1492 RT is the destination register.
1493 SYSTEM_REG is special purpose register to read. */
1496 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1497 enum aarch64_system_control_registers system_reg
)
1499 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1500 | ENCODE (rt
.num
, 5, 0));
1503 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1507 SYSTEM_REG is special purpose register to write.
1508 RT is the input register. */
1511 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1512 struct aarch64_register rt
)
1514 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1515 | ENCODE (rt
.num
, 5, 0));
1518 /* Write a SEVL instruction into *BUF.
1520 This is a hint instruction telling the hardware to trigger an event. */
1523 emit_sevl (uint32_t *buf
)
1525 return aarch64_emit_insn (buf
, SEVL
);
1528 /* Write a WFE instruction into *BUF.
1530 This is a hint instruction telling the hardware to wait for an event. */
1533 emit_wfe (uint32_t *buf
)
1535 return aarch64_emit_insn (buf
, WFE
);
1538 /* Write a SBFM instruction into *BUF.
1540 SBFM rd, rn, #immr, #imms
1542 This instruction moves the bits from #immr to #imms into the
1543 destination, sign extending the result.
1545 RD is the destination register.
1546 RN is the source register.
1547 IMMR is the bit number to start at (least significant bit).
1548 IMMS is the bit number to stop at (most significant bit). */
1551 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1552 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1554 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1555 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1557 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1558 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1559 | ENCODE (rd
.num
, 5, 0));
1562 /* Write a SBFX instruction into *BUF.
1564 SBFX rd, rn, #lsb, #width
1566 This instruction moves #width bits from #lsb into the destination, sign
1567 extending the result. This is an alias for:
1569 SBFM rd, rn, #lsb, #(lsb + width - 1)
1571 RD is the destination register.
1572 RN is the source register.
1573 LSB is the bit number to start at (least significant bit).
1574 WIDTH is the number of bits to move. */
1577 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1578 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1580 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1583 /* Write a UBFM instruction into *BUF.
1585 UBFM rd, rn, #immr, #imms
1587 This instruction moves the bits from #immr to #imms into the
1588 destination, extending the result with zeros.
1590 RD is the destination register.
1591 RN is the source register.
1592 IMMR is the bit number to start at (least significant bit).
1593 IMMS is the bit number to stop at (most significant bit). */
1596 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1597 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1599 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1600 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1602 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1603 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1604 | ENCODE (rd
.num
, 5, 0));
1607 /* Write a UBFX instruction into *BUF.
1609 UBFX rd, rn, #lsb, #width
1611 This instruction moves #width bits from #lsb into the destination,
1612 extending the result with zeros. This is an alias for:
1614 UBFM rd, rn, #lsb, #(lsb + width - 1)
1616 RD is the destination register.
1617 RN is the source register.
1618 LSB is the bit number to start at (least significant bit).
1619 WIDTH is the number of bits to move. */
1622 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1623 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1625 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1628 /* Write a CSINC instruction into *BUF.
1630 CSINC rd, rn, rm, cond
1632 This instruction conditionally increments rn or rm and places the result
1633 in rd. rn is chosen is the condition is true.
1635 RD is the destination register.
1636 RN and RM are the source registers.
1637 COND is the encoded condition. */
1640 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1641 struct aarch64_register rn
, struct aarch64_register rm
,
1644 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1646 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1647 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1648 | ENCODE (rd
.num
, 5, 0));
1651 /* Write a CSET instruction into *BUF.
1655 This instruction conditionally write 1 or 0 in the destination register.
1656 1 is written if the condition is true. This is an alias for:
1658 CSINC rd, xzr, xzr, !cond
1660 Note that the condition needs to be inverted.
1662 RD is the destination register.
1663 RN and RM are the source registers.
1664 COND is the encoded condition. */
1667 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1669 /* The least significant bit of the condition needs toggling in order to
1671 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1674 /* Write LEN instructions from BUF into the inferior memory at *TO.
1676 Note instructions are always little endian on AArch64, unlike data. */
1679 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1681 size_t byte_len
= len
* sizeof (uint32_t);
1682 #if (__BYTE_ORDER == __BIG_ENDIAN)
1683 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1686 for (i
= 0; i
< len
; i
++)
1687 le_buf
[i
] = htole32 (buf
[i
]);
1689 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1693 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1699 /* Sub-class of struct aarch64_insn_data, store information of
1700 instruction relocation for fast tracepoint. Visitor can
1701 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1702 the relocated instructions in buffer pointed by INSN_PTR. */
1704 struct aarch64_insn_relocation_data
1706 struct aarch64_insn_data base
;
1708 /* The new address the instruction is relocated to. */
1710 /* Pointer to the buffer of relocated instruction(s). */
1714 /* Implementation of aarch64_insn_visitor method "b". */
1717 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1718 struct aarch64_insn_data
*data
)
1720 struct aarch64_insn_relocation_data
*insn_reloc
1721 = (struct aarch64_insn_relocation_data
*) data
;
1723 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1725 if (can_encode_int32 (new_offset
, 28))
1726 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1729 /* Implementation of aarch64_insn_visitor method "b_cond". */
1732 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1733 struct aarch64_insn_data
*data
)
1735 struct aarch64_insn_relocation_data
*insn_reloc
1736 = (struct aarch64_insn_relocation_data
*) data
;
1738 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1740 if (can_encode_int32 (new_offset
, 21))
1742 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1745 else if (can_encode_int32 (new_offset
, 28))
1747 /* The offset is out of range for a conditional branch
1748 instruction but not for a unconditional branch. We can use
1749 the following instructions instead:
1751 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1752 B NOT_TAKEN ; Else jump over TAKEN and continue.
1759 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1760 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1761 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1765 /* Implementation of aarch64_insn_visitor method "cb". */
1768 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1769 const unsigned rn
, int is64
,
1770 struct aarch64_insn_data
*data
)
1772 struct aarch64_insn_relocation_data
*insn_reloc
1773 = (struct aarch64_insn_relocation_data
*) data
;
1775 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1777 if (can_encode_int32 (new_offset
, 21))
1779 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1780 aarch64_register (rn
, is64
), new_offset
);
1782 else if (can_encode_int32 (new_offset
, 28))
1784 /* The offset is out of range for a compare and branch
1785 instruction but not for a unconditional branch. We can use
1786 the following instructions instead:
1788 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1789 B NOT_TAKEN ; Else jump over TAKEN and continue.
1795 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1796 aarch64_register (rn
, is64
), 8);
1797 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1798 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1802 /* Implementation of aarch64_insn_visitor method "tb". */
1805 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1806 const unsigned rt
, unsigned bit
,
1807 struct aarch64_insn_data
*data
)
1809 struct aarch64_insn_relocation_data
*insn_reloc
1810 = (struct aarch64_insn_relocation_data
*) data
;
1812 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1814 if (can_encode_int32 (new_offset
, 16))
1816 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1817 aarch64_register (rt
, 1), new_offset
);
1819 else if (can_encode_int32 (new_offset
, 28))
1821 /* The offset is out of range for a test bit and branch
1822 instruction but not for a unconditional branch. We can use
1823 the following instructions instead:
1825 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1826 B NOT_TAKEN ; Else jump over TAKEN and continue.
1832 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1833 aarch64_register (rt
, 1), 8);
1834 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1835 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1840 /* Implementation of aarch64_insn_visitor method "adr". */
1843 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1845 struct aarch64_insn_data
*data
)
1847 struct aarch64_insn_relocation_data
*insn_reloc
1848 = (struct aarch64_insn_relocation_data
*) data
;
1849 /* We know exactly the address the ADR{P,} instruction will compute.
1850 We can just write it to the destination register. */
1851 CORE_ADDR address
= data
->insn_addr
+ offset
;
1855 /* Clear the lower 12 bits of the offset to get the 4K page. */
1856 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1857 aarch64_register (rd
, 1),
1861 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1862 aarch64_register (rd
, 1), address
);
1865 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1868 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1869 const unsigned rt
, const int is64
,
1870 struct aarch64_insn_data
*data
)
1872 struct aarch64_insn_relocation_data
*insn_reloc
1873 = (struct aarch64_insn_relocation_data
*) data
;
1874 CORE_ADDR address
= data
->insn_addr
+ offset
;
1876 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1877 aarch64_register (rt
, 1), address
);
1879 /* We know exactly what address to load from, and what register we
1882 MOV xd, #(oldloc + offset)
1883 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1886 LDR xd, [xd] ; or LDRSW xd, [xd]
1891 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1892 aarch64_register (rt
, 1),
1893 aarch64_register (rt
, 1),
1894 offset_memory_operand (0));
1896 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1897 aarch64_register (rt
, is64
),
1898 aarch64_register (rt
, 1),
1899 offset_memory_operand (0));
1902 /* Implementation of aarch64_insn_visitor method "others". */
1905 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1906 struct aarch64_insn_data
*data
)
1908 struct aarch64_insn_relocation_data
*insn_reloc
1909 = (struct aarch64_insn_relocation_data
*) data
;
1911 /* The instruction is not PC relative. Just re-emit it at the new
1913 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1916 static const struct aarch64_insn_visitor visitor
=
1918 aarch64_ftrace_insn_reloc_b
,
1919 aarch64_ftrace_insn_reloc_b_cond
,
1920 aarch64_ftrace_insn_reloc_cb
,
1921 aarch64_ftrace_insn_reloc_tb
,
1922 aarch64_ftrace_insn_reloc_adr
,
1923 aarch64_ftrace_insn_reloc_ldr_literal
,
1924 aarch64_ftrace_insn_reloc_others
,
1927 /* Implementation of linux_target_ops method
1928 "install_fast_tracepoint_jump_pad". */
1931 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1933 CORE_ADDR collector
,
1936 CORE_ADDR
*jump_entry
,
1937 CORE_ADDR
*trampoline
,
1938 ULONGEST
*trampoline_size
,
1939 unsigned char *jjump_pad_insn
,
1940 ULONGEST
*jjump_pad_insn_size
,
1941 CORE_ADDR
*adjusted_insn_addr
,
1942 CORE_ADDR
*adjusted_insn_addr_end
,
1950 CORE_ADDR buildaddr
= *jump_entry
;
1951 struct aarch64_insn_relocation_data insn_data
;
1953 /* We need to save the current state on the stack both to restore it
1954 later and to collect register values when the tracepoint is hit.
1956 The saved registers are pushed in a layout that needs to be in sync
1957 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1958 the supply_fast_tracepoint_registers function will fill in the
1959 register cache from a pointer to saved registers on the stack we build
1962 For simplicity, we set the size of each cell on the stack to 16 bytes.
1963 This way one cell can hold any register type, from system registers
1964 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1965 has to be 16 bytes aligned anyway.
1967 Note that the CPSR register does not exist on AArch64. Instead we
1968 can access system bits describing the process state with the
1969 MRS/MSR instructions, namely the condition flags. We save them as
1970 if they are part of a CPSR register because that's how GDB
1971 interprets these system bits. At the moment, only the condition
1972 flags are saved in CPSR (NZCV).
1974 Stack layout, each cell is 16 bytes (descending):
1976 High *-------- SIMD&FP registers from 31 down to 0. --------*
1982 *---- General purpose registers from 30 down to 0. ----*
1988 *------------- Special purpose registers. -------------*
1991 | CPSR (NZCV) | 5 cells
1994 *------------- collecting_t object --------------------*
1995 | TPIDR_EL0 | struct tracepoint * |
1996 Low *------------------------------------------------------*
1998 After this stack is set up, we issue a call to the collector, passing
1999 it the saved registers at (SP + 16). */
2001 /* Push SIMD&FP registers on the stack:
2003 SUB sp, sp, #(32 * 16)
2005 STP q30, q31, [sp, #(30 * 16)]
2010 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2011 for (i
= 30; i
>= 0; i
-= 2)
2012 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2014 /* Push general purpose registers on the stack. Note that we do not need
2015 to push x31 as it represents the xzr register and not the stack
2016 pointer in a STR instruction.
2018 SUB sp, sp, #(31 * 16)
2020 STR x30, [sp, #(30 * 16)]
2025 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2026 for (i
= 30; i
>= 0; i
-= 1)
2027 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2028 offset_memory_operand (i
* 16));
2030 /* Make space for 5 more cells.
2032 SUB sp, sp, #(5 * 16)
2035 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2040 ADD x4, sp, #((32 + 31 + 5) * 16)
2041 STR x4, [sp, #(4 * 16)]
2044 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2045 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2047 /* Save PC (tracepoint address):
2052 STR x3, [sp, #(3 * 16)]
2056 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2057 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2059 /* Save CPSR (NZCV), FPSR and FPCR:
2065 STR x2, [sp, #(2 * 16)]
2066 STR x1, [sp, #(1 * 16)]
2067 STR x0, [sp, #(0 * 16)]
2070 p
+= emit_mrs (p
, x2
, NZCV
);
2071 p
+= emit_mrs (p
, x1
, FPSR
);
2072 p
+= emit_mrs (p
, x0
, FPCR
);
2073 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2074 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2075 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2077 /* Push the collecting_t object. It consist of the address of the
2078 tracepoint and an ID for the current thread. We get the latter by
2079 reading the tpidr_el0 system register. It corresponds to the
2080 NT_ARM_TLS register accessible with ptrace.
2087 STP x0, x1, [sp, #-16]!
2091 p
+= emit_mov_addr (p
, x0
, tpoint
);
2092 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2093 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2097 The shared memory for the lock is at lockaddr. It will hold zero
2098 if no-one is holding the lock, otherwise it contains the address of
2099 the collecting_t object on the stack of the thread which acquired it.
2101 At this stage, the stack pointer points to this thread's collecting_t
2104 We use the following registers:
2105 - x0: Address of the lock.
2106 - x1: Pointer to collecting_t object.
2107 - x2: Scratch register.
2113 ; Trigger an event local to this core. So the following WFE
2114 ; instruction is ignored.
2117 ; Wait for an event. The event is triggered by either the SEVL
2118 ; or STLR instructions (store release).
2121 ; Atomically read at lockaddr. This marks the memory location as
2122 ; exclusive. This instruction also has memory constraints which
2123 ; make sure all previous data reads and writes are done before
2127 ; Try again if another thread holds the lock.
2130 ; We can lock it! Write the address of the collecting_t object.
2131 ; This instruction will fail if the memory location is not marked
2132 ; as exclusive anymore. If it succeeds, it will remove the
2133 ; exclusive mark on the memory location. This way, if another
2134 ; thread executes this instruction before us, we will fail and try
2141 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2142 p
+= emit_mov (p
, x1
, register_operand (sp
));
2146 p
+= emit_ldaxr (p
, x2
, x0
);
2147 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2148 p
+= emit_stxr (p
, w2
, x1
, x0
);
2149 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2151 /* Call collector (struct tracepoint *, unsigned char *):
2156 ; Saved registers start after the collecting_t object.
2159 ; We use an intra-procedure-call scratch register.
2160 MOV ip0, #(collector)
2163 ; And call back to C!
2168 p
+= emit_mov_addr (p
, x0
, tpoint
);
2169 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2171 p
+= emit_mov_addr (p
, ip0
, collector
);
2172 p
+= emit_blr (p
, ip0
);
2174 /* Release the lock.
2179 ; This instruction is a normal store with memory ordering
2180 ; constraints. Thanks to this we do not have to put a data
2181 ; barrier instruction to make sure all data read and writes are done
2182 ; before this instruction is executed. Furthermore, this instruction
2183 ; will trigger an event, letting other threads know they can grab
2188 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2189 p
+= emit_stlr (p
, xzr
, x0
);
2191 /* Free collecting_t object:
2196 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2198 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2199 registers from the stack.
2201 LDR x2, [sp, #(2 * 16)]
2202 LDR x1, [sp, #(1 * 16)]
2203 LDR x0, [sp, #(0 * 16)]
2209 ADD sp, sp #(5 * 16)
2212 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2213 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2214 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2215 p
+= emit_msr (p
, NZCV
, x2
);
2216 p
+= emit_msr (p
, FPSR
, x1
);
2217 p
+= emit_msr (p
, FPCR
, x0
);
2219 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2221 /* Pop general purpose registers:
2225 LDR x30, [sp, #(30 * 16)]
2227 ADD sp, sp, #(31 * 16)
2230 for (i
= 0; i
<= 30; i
+= 1)
2231 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2232 offset_memory_operand (i
* 16));
2233 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2235 /* Pop SIMD&FP registers:
2239 LDP q30, q31, [sp, #(30 * 16)]
2241 ADD sp, sp, #(32 * 16)
2244 for (i
= 0; i
<= 30; i
+= 2)
2245 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2246 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2248 /* Write the code into the inferior memory. */
2249 append_insns (&buildaddr
, p
- buf
, buf
);
2251 /* Now emit the relocated instruction. */
2252 *adjusted_insn_addr
= buildaddr
;
2253 target_read_uint32 (tpaddr
, &insn
);
2255 insn_data
.base
.insn_addr
= tpaddr
;
2256 insn_data
.new_addr
= buildaddr
;
2257 insn_data
.insn_ptr
= buf
;
2259 aarch64_relocate_instruction (insn
, &visitor
,
2260 (struct aarch64_insn_data
*) &insn_data
);
2262 /* We may not have been able to relocate the instruction. */
2263 if (insn_data
.insn_ptr
== buf
)
2266 "E.Could not relocate instruction from %s to %s.",
2267 core_addr_to_string_nz (tpaddr
),
2268 core_addr_to_string_nz (buildaddr
));
2272 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2273 *adjusted_insn_addr_end
= buildaddr
;
2275 /* Go back to the start of the buffer. */
2278 /* Emit a branch back from the jump pad. */
2279 offset
= (tpaddr
+ orig_size
- buildaddr
);
2280 if (!can_encode_int32 (offset
, 28))
2283 "E.Jump back from jump pad too far from tracepoint "
2284 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2289 p
+= emit_b (p
, 0, offset
);
2290 append_insns (&buildaddr
, p
- buf
, buf
);
2292 /* Give the caller a branch instruction into the jump pad. */
2293 offset
= (*jump_entry
- tpaddr
);
2294 if (!can_encode_int32 (offset
, 28))
2297 "E.Jump pad too far from tracepoint "
2298 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2303 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2304 *jjump_pad_insn_size
= 4;
2306 /* Return the end address of our pad. */
2307 *jump_entry
= buildaddr
;
2312 /* Helper function writing LEN instructions from START into
2313 current_insn_ptr. */
2316 emit_ops_insns (const uint32_t *start
, int len
)
2318 CORE_ADDR buildaddr
= current_insn_ptr
;
2321 debug_printf ("Adding %d instrucions at %s\n",
2322 len
, paddress (buildaddr
));
2324 append_insns (&buildaddr
, len
, start
);
2325 current_insn_ptr
= buildaddr
;
2328 /* Pop a register from the stack. */
2331 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2333 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2336 /* Push a register on the stack. */
2339 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2341 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2344 /* Implementation of emit_ops method "emit_prologue". */
2347 aarch64_emit_prologue (void)
2352 /* This function emit a prologue for the following function prototype:
2354 enum eval_result_type f (unsigned char *regs,
2357 The first argument is a buffer of raw registers. The second
2358 argument is the result of
2359 evaluating the expression, which will be set to whatever is on top of
2360 the stack at the end.
2362 The stack set up by the prologue is as such:
2364 High *------------------------------------------------------*
2367 | x1 (ULONGEST *value) |
2368 | x0 (unsigned char *regs) |
2369 Low *------------------------------------------------------*
2371 As we are implementing a stack machine, each opcode can expand the
2372 stack so we never know how far we are from the data saved by this
2373 prologue. In order to be able refer to value and regs later, we save
2374 the current stack pointer in the frame pointer. This way, it is not
2375 clobbered when calling C functions.
2377 Finally, throughout every operation, we are using register x0 as the
2378 top of the stack, and x1 as a scratch register. */
2380 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2381 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2382 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2384 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2387 emit_ops_insns (buf
, p
- buf
);
2390 /* Implementation of emit_ops method "emit_epilogue". */
2393 aarch64_emit_epilogue (void)
2398 /* Store the result of the expression (x0) in *value. */
2399 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2400 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2401 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2403 /* Restore the previous state. */
2404 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2405 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2407 /* Return expr_eval_no_error. */
2408 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2409 p
+= emit_ret (p
, lr
);
2411 emit_ops_insns (buf
, p
- buf
);
2414 /* Implementation of emit_ops method "emit_add". */
2417 aarch64_emit_add (void)
2422 p
+= emit_pop (p
, x1
);
2423 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2425 emit_ops_insns (buf
, p
- buf
);
2428 /* Implementation of emit_ops method "emit_sub". */
2431 aarch64_emit_sub (void)
2436 p
+= emit_pop (p
, x1
);
2437 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2439 emit_ops_insns (buf
, p
- buf
);
2442 /* Implementation of emit_ops method "emit_mul". */
2445 aarch64_emit_mul (void)
2450 p
+= emit_pop (p
, x1
);
2451 p
+= emit_mul (p
, x0
, x1
, x0
);
2453 emit_ops_insns (buf
, p
- buf
);
2456 /* Implementation of emit_ops method "emit_lsh". */
2459 aarch64_emit_lsh (void)
2464 p
+= emit_pop (p
, x1
);
2465 p
+= emit_lslv (p
, x0
, x1
, x0
);
2467 emit_ops_insns (buf
, p
- buf
);
2470 /* Implementation of emit_ops method "emit_rsh_signed". */
2473 aarch64_emit_rsh_signed (void)
2478 p
+= emit_pop (p
, x1
);
2479 p
+= emit_asrv (p
, x0
, x1
, x0
);
2481 emit_ops_insns (buf
, p
- buf
);
2484 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2487 aarch64_emit_rsh_unsigned (void)
2492 p
+= emit_pop (p
, x1
);
2493 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2495 emit_ops_insns (buf
, p
- buf
);
2498 /* Implementation of emit_ops method "emit_ext". */
2501 aarch64_emit_ext (int arg
)
2506 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2508 emit_ops_insns (buf
, p
- buf
);
2511 /* Implementation of emit_ops method "emit_log_not". */
2514 aarch64_emit_log_not (void)
2519 /* If the top of the stack is 0, replace it with 1. Else replace it with
2522 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2523 p
+= emit_cset (p
, x0
, EQ
);
2525 emit_ops_insns (buf
, p
- buf
);
2528 /* Implementation of emit_ops method "emit_bit_and". */
2531 aarch64_emit_bit_and (void)
2536 p
+= emit_pop (p
, x1
);
2537 p
+= emit_and (p
, x0
, x0
, x1
);
2539 emit_ops_insns (buf
, p
- buf
);
2542 /* Implementation of emit_ops method "emit_bit_or". */
2545 aarch64_emit_bit_or (void)
2550 p
+= emit_pop (p
, x1
);
2551 p
+= emit_orr (p
, x0
, x0
, x1
);
2553 emit_ops_insns (buf
, p
- buf
);
2556 /* Implementation of emit_ops method "emit_bit_xor". */
2559 aarch64_emit_bit_xor (void)
2564 p
+= emit_pop (p
, x1
);
2565 p
+= emit_eor (p
, x0
, x0
, x1
);
2567 emit_ops_insns (buf
, p
- buf
);
2570 /* Implementation of emit_ops method "emit_bit_not". */
2573 aarch64_emit_bit_not (void)
2578 p
+= emit_mvn (p
, x0
, x0
);
2580 emit_ops_insns (buf
, p
- buf
);
2583 /* Implementation of emit_ops method "emit_equal". */
2586 aarch64_emit_equal (void)
2591 p
+= emit_pop (p
, x1
);
2592 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2593 p
+= emit_cset (p
, x0
, EQ
);
2595 emit_ops_insns (buf
, p
- buf
);
2598 /* Implementation of emit_ops method "emit_less_signed". */
2601 aarch64_emit_less_signed (void)
2606 p
+= emit_pop (p
, x1
);
2607 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2608 p
+= emit_cset (p
, x0
, LT
);
2610 emit_ops_insns (buf
, p
- buf
);
2613 /* Implementation of emit_ops method "emit_less_unsigned". */
2616 aarch64_emit_less_unsigned (void)
2621 p
+= emit_pop (p
, x1
);
2622 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2623 p
+= emit_cset (p
, x0
, LO
);
2625 emit_ops_insns (buf
, p
- buf
);
2628 /* Implementation of emit_ops method "emit_ref". */
2631 aarch64_emit_ref (int size
)
2639 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2642 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2645 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2648 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2651 /* Unknown size, bail on compilation. */
2656 emit_ops_insns (buf
, p
- buf
);
2659 /* Implementation of emit_ops method "emit_if_goto". */
2662 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2667 /* The Z flag is set or cleared here. */
2668 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2669 /* This instruction must not change the Z flag. */
2670 p
+= emit_pop (p
, x0
);
2671 /* Branch over the next instruction if x0 == 0. */
2672 p
+= emit_bcond (p
, EQ
, 8);
2674 /* The NOP instruction will be patched with an unconditional branch. */
2676 *offset_p
= (p
- buf
) * 4;
2681 emit_ops_insns (buf
, p
- buf
);
2684 /* Implementation of emit_ops method "emit_goto". */
2687 aarch64_emit_goto (int *offset_p
, int *size_p
)
2692 /* The NOP instruction will be patched with an unconditional branch. */
2699 emit_ops_insns (buf
, p
- buf
);
2702 /* Implementation of emit_ops method "write_goto_address". */
2705 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2709 emit_b (&insn
, 0, to
- from
);
2710 append_insns (&from
, 1, &insn
);
2713 /* Implementation of emit_ops method "emit_const". */
2716 aarch64_emit_const (LONGEST num
)
2721 p
+= emit_mov_addr (p
, x0
, num
);
2723 emit_ops_insns (buf
, p
- buf
);
2726 /* Implementation of emit_ops method "emit_call". */
2729 aarch64_emit_call (CORE_ADDR fn
)
2734 p
+= emit_mov_addr (p
, ip0
, fn
);
2735 p
+= emit_blr (p
, ip0
);
2737 emit_ops_insns (buf
, p
- buf
);
2740 /* Implementation of emit_ops method "emit_reg". */
2743 aarch64_emit_reg (int reg
)
2748 /* Set x0 to unsigned char *regs. */
2749 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2750 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2751 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2753 emit_ops_insns (buf
, p
- buf
);
2755 aarch64_emit_call (get_raw_reg_func_addr ());
2758 /* Implementation of emit_ops method "emit_pop". */
2761 aarch64_emit_pop (void)
2766 p
+= emit_pop (p
, x0
);
2768 emit_ops_insns (buf
, p
- buf
);
2771 /* Implementation of emit_ops method "emit_stack_flush". */
2774 aarch64_emit_stack_flush (void)
2779 p
+= emit_push (p
, x0
);
2781 emit_ops_insns (buf
, p
- buf
);
2784 /* Implementation of emit_ops method "emit_zero_ext". */
2787 aarch64_emit_zero_ext (int arg
)
2792 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2794 emit_ops_insns (buf
, p
- buf
);
2797 /* Implementation of emit_ops method "emit_swap". */
2800 aarch64_emit_swap (void)
2805 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2806 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2807 p
+= emit_mov (p
, x0
, register_operand (x1
));
2809 emit_ops_insns (buf
, p
- buf
);
2812 /* Implementation of emit_ops method "emit_stack_adjust". */
2815 aarch64_emit_stack_adjust (int n
)
2817 /* This is not needed with our design. */
2821 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2823 emit_ops_insns (buf
, p
- buf
);
2826 /* Implementation of emit_ops method "emit_int_call_1". */
2829 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2834 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2836 emit_ops_insns (buf
, p
- buf
);
2838 aarch64_emit_call (fn
);
2841 /* Implementation of emit_ops method "emit_void_call_2". */
2844 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2849 /* Push x0 on the stack. */
2850 aarch64_emit_stack_flush ();
2852 /* Setup arguments for the function call:
2855 x1: top of the stack
2860 p
+= emit_mov (p
, x1
, register_operand (x0
));
2861 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2863 emit_ops_insns (buf
, p
- buf
);
2865 aarch64_emit_call (fn
);
2868 aarch64_emit_pop ();
2871 /* Implementation of emit_ops method "emit_eq_goto". */
2874 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2879 p
+= emit_pop (p
, x1
);
2880 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2881 /* Branch over the next instruction if x0 != x1. */
2882 p
+= emit_bcond (p
, NE
, 8);
2883 /* The NOP instruction will be patched with an unconditional branch. */
2885 *offset_p
= (p
- buf
) * 4;
2890 emit_ops_insns (buf
, p
- buf
);
2893 /* Implementation of emit_ops method "emit_ne_goto". */
2896 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2901 p
+= emit_pop (p
, x1
);
2902 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2903 /* Branch over the next instruction if x0 == x1. */
2904 p
+= emit_bcond (p
, EQ
, 8);
2905 /* The NOP instruction will be patched with an unconditional branch. */
2907 *offset_p
= (p
- buf
) * 4;
2912 emit_ops_insns (buf
, p
- buf
);
2915 /* Implementation of emit_ops method "emit_lt_goto". */
2918 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2923 p
+= emit_pop (p
, x1
);
2924 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2925 /* Branch over the next instruction if x0 >= x1. */
2926 p
+= emit_bcond (p
, GE
, 8);
2927 /* The NOP instruction will be patched with an unconditional branch. */
2929 *offset_p
= (p
- buf
) * 4;
2934 emit_ops_insns (buf
, p
- buf
);
2937 /* Implementation of emit_ops method "emit_le_goto". */
2940 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2945 p
+= emit_pop (p
, x1
);
2946 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2947 /* Branch over the next instruction if x0 > x1. */
2948 p
+= emit_bcond (p
, GT
, 8);
2949 /* The NOP instruction will be patched with an unconditional branch. */
2951 *offset_p
= (p
- buf
) * 4;
2956 emit_ops_insns (buf
, p
- buf
);
2959 /* Implementation of emit_ops method "emit_gt_goto". */
2962 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2967 p
+= emit_pop (p
, x1
);
2968 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2969 /* Branch over the next instruction if x0 <= x1. */
2970 p
+= emit_bcond (p
, LE
, 8);
2971 /* The NOP instruction will be patched with an unconditional branch. */
2973 *offset_p
= (p
- buf
) * 4;
2978 emit_ops_insns (buf
, p
- buf
);
2981 /* Implementation of emit_ops method "emit_ge_got". */
2984 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2989 p
+= emit_pop (p
, x1
);
2990 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2991 /* Branch over the next instruction if x0 <= x1. */
2992 p
+= emit_bcond (p
, LT
, 8);
2993 /* The NOP instruction will be patched with an unconditional branch. */
2995 *offset_p
= (p
- buf
) * 4;
3000 emit_ops_insns (buf
, p
- buf
);
3003 static struct emit_ops aarch64_emit_ops_impl
=
3005 aarch64_emit_prologue
,
3006 aarch64_emit_epilogue
,
3011 aarch64_emit_rsh_signed
,
3012 aarch64_emit_rsh_unsigned
,
3014 aarch64_emit_log_not
,
3015 aarch64_emit_bit_and
,
3016 aarch64_emit_bit_or
,
3017 aarch64_emit_bit_xor
,
3018 aarch64_emit_bit_not
,
3020 aarch64_emit_less_signed
,
3021 aarch64_emit_less_unsigned
,
3023 aarch64_emit_if_goto
,
3025 aarch64_write_goto_address
,
3030 aarch64_emit_stack_flush
,
3031 aarch64_emit_zero_ext
,
3033 aarch64_emit_stack_adjust
,
3034 aarch64_emit_int_call_1
,
3035 aarch64_emit_void_call_2
,
3036 aarch64_emit_eq_goto
,
3037 aarch64_emit_ne_goto
,
3038 aarch64_emit_lt_goto
,
3039 aarch64_emit_le_goto
,
3040 aarch64_emit_gt_goto
,
3041 aarch64_emit_ge_got
,
3044 /* Implementation of linux_target_ops method "emit_ops". */
3046 static struct emit_ops
*
3047 aarch64_emit_ops (void)
3049 return &aarch64_emit_ops_impl
;
3052 /* Implementation of linux_target_ops method
3053 "get_min_fast_tracepoint_insn_len". */
3056 aarch64_get_min_fast_tracepoint_insn_len (void)
3061 /* Implementation of linux_target_ops method "supports_range_stepping". */
3064 aarch64_supports_range_stepping (void)
3069 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3072 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3074 if (is_64bit_tdesc ())
3076 *size
= aarch64_breakpoint_len
;
3077 return aarch64_breakpoint
;
3080 return arm_sw_breakpoint_from_kind (kind
, size
);
3083 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3086 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3088 if (is_64bit_tdesc ())
3089 return aarch64_breakpoint_len
;
3091 return arm_breakpoint_kind_from_pc (pcptr
);
3094 /* Implementation of the target ops method
3095 "breakpoint_kind_from_current_state". */
3098 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3100 if (is_64bit_tdesc ())
3101 return aarch64_breakpoint_len
;
3103 return arm_breakpoint_kind_from_current_state (pcptr
);
3106 /* Support for hardware single step. */
3109 aarch64_supports_hardware_single_step (void)
3114 struct linux_target_ops the_low_target
=
3116 aarch64_linux_siginfo_fixup
,
3117 aarch64_linux_new_process
,
3118 aarch64_linux_delete_process
,
3119 aarch64_linux_new_thread
,
3120 aarch64_linux_delete_thread
,
3121 aarch64_linux_new_fork
,
3122 aarch64_linux_prepare_to_resume
,
3123 NULL
, /* process_qsupported */
3124 aarch64_supports_tracepoints
,
3125 aarch64_get_thread_area
,
3126 aarch64_install_fast_tracepoint_jump_pad
,
3128 aarch64_get_min_fast_tracepoint_insn_len
,
3129 aarch64_supports_range_stepping
,
3130 aarch64_supports_hardware_single_step
,
3131 aarch64_get_syscall_trapinfo
,
3134 /* The linux target ops object. */
3136 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3139 initialize_low_arch (void)
3141 initialize_low_arch_aarch32 ();
3143 initialize_regsets_info (&aarch64_regsets_info
);
3144 initialize_regsets_info (&aarch64_sve_regsets_info
);