1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch32-tdesc.h"
43 #include "linux-aarch64-tdesc.h"
44 #include "nat/aarch64-sve-linux-ptrace.h"
51 /* Per-process arch-specific data we want to keep. */
53 struct arch_process_info
55 /* Hardware breakpoint/watchpoint data.
56 The reason for them to be per-process rather than per-thread is
57 due to the lack of information in the gdbserver environment;
58 gdbserver is not told that whether a requested hardware
59 breakpoint/watchpoint is thread specific or not, so it has to set
60 each hw bp/wp for every thread in the current process. The
61 higher level bp/wp management in gdb will resume a thread if a hw
62 bp/wp trap is not expected for it. Since the hw bp/wp setting is
63 same for each thread, it is reasonable for the data to live here.
65 struct aarch64_debug_reg_state debug_reg_state
;
68 /* Return true if the size of register 0 is 8 byte. */
73 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
75 return register_size (regcache
->tdesc
, 0) == 8;
78 /* Return true if the regcache contains the number of SVE registers. */
83 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
85 return regcache
->tdesc
->reg_defs
.size () == AARCH64_SVE_NUM_REGS
;
89 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
91 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
94 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
95 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
96 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
97 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
98 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
102 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
104 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
107 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
108 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
109 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
110 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
111 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
115 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
117 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
120 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
121 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
122 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
123 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
127 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
129 const struct user_fpsimd_state
*regset
130 = (const struct user_fpsimd_state
*) buf
;
133 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
134 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
135 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
136 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
139 /* Store the pauth registers to regcache. */
142 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
144 uint64_t *pauth_regset
= (uint64_t *) buf
;
145 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
150 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
152 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
156 /* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158 extern int debug_threads
;
160 /* Implementation of linux_target_ops method "get_pc". */
163 aarch64_get_pc (struct regcache
*regcache
)
165 if (register_size (regcache
->tdesc
, 0) == 8)
166 return linux_get_pc_64bit (regcache
);
168 return linux_get_pc_32bit (regcache
);
171 /* Implementation of linux_target_ops method "set_pc". */
174 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
176 if (register_size (regcache
->tdesc
, 0) == 8)
177 linux_set_pc_64bit (regcache
, pc
);
179 linux_set_pc_32bit (regcache
, pc
);
182 #define aarch64_breakpoint_len 4
184 /* AArch64 BRK software debug mode instruction.
185 This instruction needs to match gdb/aarch64-tdep.c
186 (aarch64_default_breakpoint). */
187 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
189 /* Implementation of linux_target_ops method "breakpoint_at". */
192 aarch64_breakpoint_at (CORE_ADDR where
)
194 if (is_64bit_tdesc ())
196 gdb_byte insn
[aarch64_breakpoint_len
];
198 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
199 aarch64_breakpoint_len
);
200 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
206 return arm_breakpoint_at (where
);
210 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
214 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
216 state
->dr_addr_bp
[i
] = 0;
217 state
->dr_ctrl_bp
[i
] = 0;
218 state
->dr_ref_count_bp
[i
] = 0;
221 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
223 state
->dr_addr_wp
[i
] = 0;
224 state
->dr_ctrl_wp
[i
] = 0;
225 state
->dr_ref_count_wp
[i
] = 0;
229 /* Return the pointer to the debug register state structure in the
230 current process' arch-specific data area. */
232 struct aarch64_debug_reg_state
*
233 aarch64_get_debug_reg_state (pid_t pid
)
235 struct process_info
*proc
= find_process_pid (pid
);
237 return &proc
->priv
->arch_private
->debug_reg_state
;
240 /* Implementation of linux_target_ops method "supports_z_point_type". */
243 aarch64_supports_z_point_type (char z_type
)
249 case Z_PACKET_WRITE_WP
:
250 case Z_PACKET_READ_WP
:
251 case Z_PACKET_ACCESS_WP
:
258 /* Implementation of linux_target_ops method "insert_point".
260 It actually only records the info of the to-be-inserted bp/wp;
261 the actual insertion will happen when threads are resumed. */
264 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
265 int len
, struct raw_breakpoint
*bp
)
268 enum target_hw_bp_type targ_type
;
269 struct aarch64_debug_reg_state
*state
270 = aarch64_get_debug_reg_state (pid_of (current_thread
));
273 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
274 (unsigned long) addr
, len
);
276 /* Determine the type from the raw breakpoint type. */
277 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
279 if (targ_type
!= hw_execute
)
281 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
282 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
283 1 /* is_insert */, state
);
291 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
292 instruction. Set it to 2 to correctly encode length bit
293 mask in hardware/watchpoint control register. */
296 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
297 1 /* is_insert */, state
);
301 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
307 /* Implementation of linux_target_ops method "remove_point".
309 It actually only records the info of the to-be-removed bp/wp,
310 the actual removal will be done when threads are resumed. */
313 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
314 int len
, struct raw_breakpoint
*bp
)
317 enum target_hw_bp_type targ_type
;
318 struct aarch64_debug_reg_state
*state
319 = aarch64_get_debug_reg_state (pid_of (current_thread
));
322 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
323 (unsigned long) addr
, len
);
325 /* Determine the type from the raw breakpoint type. */
326 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
328 /* Set up state pointers. */
329 if (targ_type
!= hw_execute
)
331 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
337 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
338 instruction. Set it to 2 to correctly encode length bit
339 mask in hardware/watchpoint control register. */
342 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
343 0 /* is_insert */, state
);
347 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
353 /* Implementation of linux_target_ops method "stopped_data_address". */
356 aarch64_stopped_data_address (void)
360 struct aarch64_debug_reg_state
*state
;
362 pid
= lwpid_of (current_thread
);
364 /* Get the siginfo. */
365 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
366 return (CORE_ADDR
) 0;
368 /* Need to be a hardware breakpoint/watchpoint trap. */
369 if (siginfo
.si_signo
!= SIGTRAP
370 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
371 return (CORE_ADDR
) 0;
373 /* Check if the address matches any watched address. */
374 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
375 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
377 const unsigned int offset
378 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
379 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
380 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
381 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
382 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
383 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
385 if (state
->dr_ref_count_wp
[i
]
386 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
387 && addr_trap
>= addr_watch_aligned
388 && addr_trap
< addr_watch
+ len
)
390 /* ADDR_TRAP reports the first address of the memory range
391 accessed by the CPU, regardless of what was the memory
392 range watched. Thus, a large CPU access that straddles
393 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
394 ADDR_TRAP that is lower than the
395 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
397 addr: | 4 | 5 | 6 | 7 | 8 |
398 |---- range watched ----|
399 |----------- range accessed ------------|
401 In this case, ADDR_TRAP will be 4.
403 To match a watchpoint known to GDB core, we must never
404 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
405 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
406 positive on kernels older than 4.10. See PR
412 return (CORE_ADDR
) 0;
415 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
418 aarch64_stopped_by_watchpoint (void)
420 if (aarch64_stopped_data_address () != 0)
426 /* Fetch the thread-local storage pointer for libthread_db. */
429 ps_get_thread_area (struct ps_prochandle
*ph
,
430 lwpid_t lwpid
, int idx
, void **base
)
432 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
436 /* Implementation of linux_target_ops method "siginfo_fixup". */
439 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
441 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
442 if (!is_64bit_tdesc ())
445 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
448 aarch64_siginfo_from_compat_siginfo (native
,
449 (struct compat_siginfo
*) inf
);
457 /* Implementation of linux_target_ops method "new_process". */
459 static struct arch_process_info
*
460 aarch64_linux_new_process (void)
462 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
464 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
469 /* Implementation of linux_target_ops method "delete_process". */
472 aarch64_linux_delete_process (struct arch_process_info
*info
)
477 /* Implementation of linux_target_ops method "linux_new_fork". */
480 aarch64_linux_new_fork (struct process_info
*parent
,
481 struct process_info
*child
)
483 /* These are allocated by linux_add_process. */
484 gdb_assert (parent
->priv
!= NULL
485 && parent
->priv
->arch_private
!= NULL
);
486 gdb_assert (child
->priv
!= NULL
487 && child
->priv
->arch_private
!= NULL
);
489 /* Linux kernel before 2.6.33 commit
490 72f674d203cd230426437cdcf7dd6f681dad8b0d
491 will inherit hardware debug registers from parent
492 on fork/vfork/clone. Newer Linux kernels create such tasks with
493 zeroed debug registers.
495 GDB core assumes the child inherits the watchpoints/hw
496 breakpoints of the parent, and will remove them all from the
497 forked off process. Copy the debug registers mirrors into the
498 new process so that all breakpoints and watchpoints can be
499 removed together. The debug registers mirror will become zeroed
500 in the end before detaching the forked off process, thus making
501 this compatible with older Linux kernels too. */
503 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
506 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
507 #define AARCH64_HWCAP_PACA (1 << 30)
509 /* Implementation of linux_target_ops method "arch_setup". */
512 aarch64_arch_setup (void)
514 unsigned int machine
;
518 tid
= lwpid_of (current_thread
);
520 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
524 uint64_t vq
= aarch64_sve_get_vq (tid
);
525 unsigned long hwcap
= linux_get_hwcap (8);
526 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
528 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
531 current_process ()->tdesc
= aarch32_linux_read_description ();
533 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
536 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
539 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
541 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
544 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
547 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
549 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
552 static struct regset_info aarch64_regsets
[] =
554 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
555 sizeof (struct user_pt_regs
), GENERAL_REGS
,
556 aarch64_fill_gregset
, aarch64_store_gregset
},
557 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
558 sizeof (struct user_fpsimd_state
), FP_REGS
,
559 aarch64_fill_fpregset
, aarch64_store_fpregset
561 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
562 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
563 NULL
, aarch64_store_pauthregset
},
567 static struct regsets_info aarch64_regsets_info
=
569 aarch64_regsets
, /* regsets */
571 NULL
, /* disabled_regsets */
574 static struct regs_info regs_info_aarch64
=
576 NULL
, /* regset_bitmap */
578 &aarch64_regsets_info
,
581 static struct regset_info aarch64_sve_regsets
[] =
583 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
584 sizeof (struct user_pt_regs
), GENERAL_REGS
,
585 aarch64_fill_gregset
, aarch64_store_gregset
},
586 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
587 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
588 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
590 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
591 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
592 NULL
, aarch64_store_pauthregset
},
596 static struct regsets_info aarch64_sve_regsets_info
=
598 aarch64_sve_regsets
, /* regsets. */
599 0, /* num_regsets. */
600 NULL
, /* disabled_regsets. */
603 static struct regs_info regs_info_aarch64_sve
=
605 NULL
, /* regset_bitmap. */
607 &aarch64_sve_regsets_info
,
610 /* Implementation of linux_target_ops method "regs_info". */
612 static const struct regs_info
*
613 aarch64_regs_info (void)
615 if (!is_64bit_tdesc ())
616 return ®s_info_aarch32
;
619 return ®s_info_aarch64_sve
;
621 return ®s_info_aarch64
;
624 /* Implementation of linux_target_ops method "supports_tracepoints". */
627 aarch64_supports_tracepoints (void)
629 if (current_thread
== NULL
)
633 /* We don't support tracepoints on aarch32 now. */
634 return is_64bit_tdesc ();
638 /* Implementation of linux_target_ops method "get_thread_area". */
641 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
646 iovec
.iov_base
= ®
;
647 iovec
.iov_len
= sizeof (reg
);
649 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
657 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
660 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
662 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
668 collect_register_by_name (regcache
, "x8", &l_sysno
);
669 *sysno
= (int) l_sysno
;
672 collect_register_by_name (regcache
, "r7", sysno
);
675 /* List of condition codes that we need. */
677 enum aarch64_condition_codes
688 enum aarch64_operand_type
694 /* Representation of an operand. At this time, it only supports register
695 and immediate types. */
697 struct aarch64_operand
699 /* Type of the operand. */
700 enum aarch64_operand_type type
;
702 /* Value of the operand according to the type. */
706 struct aarch64_register reg
;
710 /* List of registers that we are currently using, we can add more here as
711 we need to use them. */
713 /* General purpose scratch registers (64 bit). */
714 static const struct aarch64_register x0
= { 0, 1 };
715 static const struct aarch64_register x1
= { 1, 1 };
716 static const struct aarch64_register x2
= { 2, 1 };
717 static const struct aarch64_register x3
= { 3, 1 };
718 static const struct aarch64_register x4
= { 4, 1 };
720 /* General purpose scratch registers (32 bit). */
721 static const struct aarch64_register w0
= { 0, 0 };
722 static const struct aarch64_register w2
= { 2, 0 };
724 /* Intra-procedure scratch registers. */
725 static const struct aarch64_register ip0
= { 16, 1 };
727 /* Special purpose registers. */
728 static const struct aarch64_register fp
= { 29, 1 };
729 static const struct aarch64_register lr
= { 30, 1 };
730 static const struct aarch64_register sp
= { 31, 1 };
731 static const struct aarch64_register xzr
= { 31, 1 };
733 /* Dynamically allocate a new register. If we know the register
734 statically, we should make it a global as above instead of using this
737 static struct aarch64_register
738 aarch64_register (unsigned num
, int is64
)
740 return (struct aarch64_register
) { num
, is64
};
743 /* Helper function to create a register operand, for instructions with
744 different types of operands.
747 p += emit_mov (p, x0, register_operand (x1)); */
749 static struct aarch64_operand
750 register_operand (struct aarch64_register reg
)
752 struct aarch64_operand operand
;
754 operand
.type
= OPERAND_REGISTER
;
760 /* Helper function to create an immediate operand, for instructions with
761 different types of operands.
764 p += emit_mov (p, x0, immediate_operand (12)); */
766 static struct aarch64_operand
767 immediate_operand (uint32_t imm
)
769 struct aarch64_operand operand
;
771 operand
.type
= OPERAND_IMMEDIATE
;
777 /* Helper function to create an offset memory operand.
780 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
782 static struct aarch64_memory_operand
783 offset_memory_operand (int32_t offset
)
785 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
788 /* Helper function to create a pre-index memory operand.
791 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
793 static struct aarch64_memory_operand
794 preindex_memory_operand (int32_t index
)
796 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
799 /* Helper function to create a post-index memory operand.
802 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
804 static struct aarch64_memory_operand
805 postindex_memory_operand (int32_t index
)
807 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
810 /* System control registers. These special registers can be written and
811 read with the MRS and MSR instructions.
813 - NZCV: Condition flags. GDB refers to this register under the CPSR
815 - FPSR: Floating-point status register.
816 - FPCR: Floating-point control registers.
817 - TPIDR_EL0: Software thread ID register. */
819 enum aarch64_system_control_registers
821 /* op0 op1 crn crm op2 */
822 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
823 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
824 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
825 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
828 /* Write a BLR instruction into *BUF.
832 RN is the register to branch to. */
835 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
837 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
840 /* Write a RET instruction into *BUF.
844 RN is the register to branch to. */
847 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
849 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
853 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
854 struct aarch64_register rt
,
855 struct aarch64_register rt2
,
856 struct aarch64_register rn
,
857 struct aarch64_memory_operand operand
)
864 opc
= ENCODE (2, 2, 30);
866 opc
= ENCODE (0, 2, 30);
868 switch (operand
.type
)
870 case MEMORY_OPERAND_OFFSET
:
872 pre_index
= ENCODE (1, 1, 24);
873 write_back
= ENCODE (0, 1, 23);
876 case MEMORY_OPERAND_POSTINDEX
:
878 pre_index
= ENCODE (0, 1, 24);
879 write_back
= ENCODE (1, 1, 23);
882 case MEMORY_OPERAND_PREINDEX
:
884 pre_index
= ENCODE (1, 1, 24);
885 write_back
= ENCODE (1, 1, 23);
892 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
893 | ENCODE (operand
.index
>> 3, 7, 15)
894 | ENCODE (rt2
.num
, 5, 10)
895 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
898 /* Write a STP instruction into *BUF.
900 STP rt, rt2, [rn, #offset]
901 STP rt, rt2, [rn, #index]!
902 STP rt, rt2, [rn], #index
904 RT and RT2 are the registers to store.
905 RN is the base address register.
906 OFFSET is the immediate to add to the base address. It is limited to a
907 -512 .. 504 range (7 bits << 3). */
910 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
911 struct aarch64_register rt2
, struct aarch64_register rn
,
912 struct aarch64_memory_operand operand
)
914 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
917 /* Write a LDP instruction into *BUF.
919 LDP rt, rt2, [rn, #offset]
920 LDP rt, rt2, [rn, #index]!
921 LDP rt, rt2, [rn], #index
923 RT and RT2 are the registers to store.
924 RN is the base address register.
925 OFFSET is the immediate to add to the base address. It is limited to a
926 -512 .. 504 range (7 bits << 3). */
929 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
930 struct aarch64_register rt2
, struct aarch64_register rn
,
931 struct aarch64_memory_operand operand
)
933 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
936 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
938 LDP qt, qt2, [rn, #offset]
940 RT and RT2 are the Q registers to store.
941 RN is the base address register.
942 OFFSET is the immediate to add to the base address. It is limited to
943 -1024 .. 1008 range (7 bits << 4). */
946 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
947 struct aarch64_register rn
, int32_t offset
)
949 uint32_t opc
= ENCODE (2, 2, 30);
950 uint32_t pre_index
= ENCODE (1, 1, 24);
952 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
953 | ENCODE (offset
>> 4, 7, 15)
954 | ENCODE (rt2
, 5, 10)
955 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
958 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
960 STP qt, qt2, [rn, #offset]
962 RT and RT2 are the Q registers to store.
963 RN is the base address register.
964 OFFSET is the immediate to add to the base address. It is limited to
965 -1024 .. 1008 range (7 bits << 4). */
968 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
969 struct aarch64_register rn
, int32_t offset
)
971 uint32_t opc
= ENCODE (2, 2, 30);
972 uint32_t pre_index
= ENCODE (1, 1, 24);
974 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
975 | ENCODE (offset
>> 4, 7, 15)
976 | ENCODE (rt2
, 5, 10)
977 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
980 /* Write a LDRH instruction into *BUF.
982 LDRH wt, [xn, #offset]
983 LDRH wt, [xn, #index]!
984 LDRH wt, [xn], #index
986 RT is the register to store.
987 RN is the base address register.
988 OFFSET is the immediate to add to the base address. It is limited to
989 0 .. 32760 range (12 bits << 3). */
992 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
993 struct aarch64_register rn
,
994 struct aarch64_memory_operand operand
)
996 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
999 /* Write a LDRB instruction into *BUF.
1001 LDRB wt, [xn, #offset]
1002 LDRB wt, [xn, #index]!
1003 LDRB wt, [xn], #index
1005 RT is the register to store.
1006 RN is the base address register.
1007 OFFSET is the immediate to add to the base address. It is limited to
1008 0 .. 32760 range (12 bits << 3). */
1011 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1012 struct aarch64_register rn
,
1013 struct aarch64_memory_operand operand
)
1015 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1020 /* Write a STR instruction into *BUF.
1022 STR rt, [rn, #offset]
1023 STR rt, [rn, #index]!
1024 STR rt, [rn], #index
1026 RT is the register to store.
1027 RN is the base address register.
1028 OFFSET is the immediate to add to the base address. It is limited to
1029 0 .. 32760 range (12 bits << 3). */
1032 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1033 struct aarch64_register rn
,
1034 struct aarch64_memory_operand operand
)
1036 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1039 /* Helper function emitting an exclusive load or store instruction. */
1042 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1043 enum aarch64_opcodes opcode
,
1044 struct aarch64_register rs
,
1045 struct aarch64_register rt
,
1046 struct aarch64_register rt2
,
1047 struct aarch64_register rn
)
1049 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1050 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1051 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1054 /* Write a LAXR instruction into *BUF.
1058 RT is the destination register.
1059 RN is the base address register. */
1062 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1063 struct aarch64_register rn
)
1065 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1069 /* Write a STXR instruction into *BUF.
1073 RS is the result register, it indicates if the store succeeded or not.
1074 RT is the destination register.
1075 RN is the base address register. */
1078 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1079 struct aarch64_register rt
, struct aarch64_register rn
)
1081 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1085 /* Write a STLR instruction into *BUF.
1089 RT is the register to store.
1090 RN is the base address register. */
1093 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1094 struct aarch64_register rn
)
1096 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1100 /* Helper function for data processing instructions with register sources. */
1103 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1104 struct aarch64_register rd
,
1105 struct aarch64_register rn
,
1106 struct aarch64_register rm
)
1108 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1110 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1111 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1114 /* Helper function for data processing instructions taking either a register
1118 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1119 struct aarch64_register rd
,
1120 struct aarch64_register rn
,
1121 struct aarch64_operand operand
)
1123 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1124 /* The opcode is different for register and immediate source operands. */
1125 uint32_t operand_opcode
;
1127 if (operand
.type
== OPERAND_IMMEDIATE
)
1129 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1130 operand_opcode
= ENCODE (8, 4, 25);
1132 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1133 | ENCODE (operand
.imm
, 12, 10)
1134 | ENCODE (rn
.num
, 5, 5)
1135 | ENCODE (rd
.num
, 5, 0));
1139 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1140 operand_opcode
= ENCODE (5, 4, 25);
1142 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1147 /* Write an ADD instruction into *BUF.
1152 This function handles both an immediate and register add.
1154 RD is the destination register.
1155 RN is the input register.
1156 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1157 OPERAND_REGISTER. */
1160 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1161 struct aarch64_register rn
, struct aarch64_operand operand
)
1163 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1166 /* Write a SUB instruction into *BUF.
1171 This function handles both an immediate and register sub.
1173 RD is the destination register.
1174 RN is the input register.
1175 IMM is the immediate to substract to RN. */
1178 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1179 struct aarch64_register rn
, struct aarch64_operand operand
)
1181 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1184 /* Write a MOV instruction into *BUF.
1189 This function handles both a wide immediate move and a register move,
1190 with the condition that the source register is not xzr. xzr and the
1191 stack pointer share the same encoding and this function only supports
1194 RD is the destination register.
1195 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1196 OPERAND_REGISTER. */
1199 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1200 struct aarch64_operand operand
)
1202 if (operand
.type
== OPERAND_IMMEDIATE
)
1204 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1205 /* Do not shift the immediate. */
1206 uint32_t shift
= ENCODE (0, 2, 21);
1208 return aarch64_emit_insn (buf
, MOV
| size
| shift
1209 | ENCODE (operand
.imm
, 16, 5)
1210 | ENCODE (rd
.num
, 5, 0));
1213 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1216 /* Write a MOVK instruction into *BUF.
1218 MOVK rd, #imm, lsl #shift
1220 RD is the destination register.
1221 IMM is the immediate.
1222 SHIFT is the logical shift left to apply to IMM. */
1225 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1228 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1230 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1231 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1234 /* Write instructions into *BUF in order to move ADDR into a register.
1235 ADDR can be a 64-bit value.
1237 This function will emit a series of MOV and MOVK instructions, such as:
1240 MOVK xd, #(addr >> 16), lsl #16
1241 MOVK xd, #(addr >> 32), lsl #32
1242 MOVK xd, #(addr >> 48), lsl #48 */
1245 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1249 /* The MOV (wide immediate) instruction clears to top bits of the
1251 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1253 if ((addr
>> 16) != 0)
1254 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1258 if ((addr
>> 32) != 0)
1259 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1263 if ((addr
>> 48) != 0)
1264 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1269 /* Write a SUBS instruction into *BUF.
1273 This instruction update the condition flags.
1275 RD is the destination register.
1276 RN and RM are the source registers. */
1279 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1280 struct aarch64_register rn
, struct aarch64_operand operand
)
1282 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1285 /* Write a CMP instruction into *BUF.
1289 This instruction is an alias of SUBS xzr, rn, rm.
1291 RN and RM are the registers to compare. */
1294 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1295 struct aarch64_operand operand
)
1297 return emit_subs (buf
, xzr
, rn
, operand
);
1300 /* Write a AND instruction into *BUF.
1304 RD is the destination register.
1305 RN and RM are the source registers. */
1308 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1309 struct aarch64_register rn
, struct aarch64_register rm
)
1311 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1314 /* Write a ORR instruction into *BUF.
1318 RD is the destination register.
1319 RN and RM are the source registers. */
1322 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1323 struct aarch64_register rn
, struct aarch64_register rm
)
1325 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1328 /* Write a ORN instruction into *BUF.
1332 RD is the destination register.
1333 RN and RM are the source registers. */
1336 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1337 struct aarch64_register rn
, struct aarch64_register rm
)
1339 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1342 /* Write a EOR instruction into *BUF.
1346 RD is the destination register.
1347 RN and RM are the source registers. */
1350 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1351 struct aarch64_register rn
, struct aarch64_register rm
)
1353 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1356 /* Write a MVN instruction into *BUF.
1360 This is an alias for ORN rd, xzr, rm.
1362 RD is the destination register.
1363 RM is the source register. */
1366 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1367 struct aarch64_register rm
)
1369 return emit_orn (buf
, rd
, xzr
, rm
);
1372 /* Write a LSLV instruction into *BUF.
1376 RD is the destination register.
1377 RN and RM are the source registers. */
1380 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1381 struct aarch64_register rn
, struct aarch64_register rm
)
1383 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1386 /* Write a LSRV instruction into *BUF.
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1394 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1395 struct aarch64_register rn
, struct aarch64_register rm
)
1397 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1400 /* Write a ASRV instruction into *BUF.
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1408 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1409 struct aarch64_register rn
, struct aarch64_register rm
)
1411 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1414 /* Write a MUL instruction into *BUF.
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1422 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1423 struct aarch64_register rn
, struct aarch64_register rm
)
1425 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1428 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1432 RT is the destination register.
1433 SYSTEM_REG is special purpose register to read. */
1436 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1437 enum aarch64_system_control_registers system_reg
)
1439 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1440 | ENCODE (rt
.num
, 5, 0));
1443 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1447 SYSTEM_REG is special purpose register to write.
1448 RT is the input register. */
1451 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1452 struct aarch64_register rt
)
1454 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1455 | ENCODE (rt
.num
, 5, 0));
1458 /* Write a SEVL instruction into *BUF.
1460 This is a hint instruction telling the hardware to trigger an event. */
1463 emit_sevl (uint32_t *buf
)
1465 return aarch64_emit_insn (buf
, SEVL
);
1468 /* Write a WFE instruction into *BUF.
1470 This is a hint instruction telling the hardware to wait for an event. */
1473 emit_wfe (uint32_t *buf
)
1475 return aarch64_emit_insn (buf
, WFE
);
1478 /* Write a SBFM instruction into *BUF.
1480 SBFM rd, rn, #immr, #imms
1482 This instruction moves the bits from #immr to #imms into the
1483 destination, sign extending the result.
1485 RD is the destination register.
1486 RN is the source register.
1487 IMMR is the bit number to start at (least significant bit).
1488 IMMS is the bit number to stop at (most significant bit). */
1491 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1492 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1494 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1495 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1497 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1498 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1499 | ENCODE (rd
.num
, 5, 0));
1502 /* Write a SBFX instruction into *BUF.
1504 SBFX rd, rn, #lsb, #width
1506 This instruction moves #width bits from #lsb into the destination, sign
1507 extending the result. This is an alias for:
1509 SBFM rd, rn, #lsb, #(lsb + width - 1)
1511 RD is the destination register.
1512 RN is the source register.
1513 LSB is the bit number to start at (least significant bit).
1514 WIDTH is the number of bits to move. */
1517 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1518 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1520 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1523 /* Write a UBFM instruction into *BUF.
1525 UBFM rd, rn, #immr, #imms
1527 This instruction moves the bits from #immr to #imms into the
1528 destination, extending the result with zeros.
1530 RD is the destination register.
1531 RN is the source register.
1532 IMMR is the bit number to start at (least significant bit).
1533 IMMS is the bit number to stop at (most significant bit). */
1536 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1537 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1539 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1540 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1542 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1543 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1544 | ENCODE (rd
.num
, 5, 0));
1547 /* Write a UBFX instruction into *BUF.
1549 UBFX rd, rn, #lsb, #width
1551 This instruction moves #width bits from #lsb into the destination,
1552 extending the result with zeros. This is an alias for:
1554 UBFM rd, rn, #lsb, #(lsb + width - 1)
1556 RD is the destination register.
1557 RN is the source register.
1558 LSB is the bit number to start at (least significant bit).
1559 WIDTH is the number of bits to move. */
1562 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1563 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1565 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1568 /* Write a CSINC instruction into *BUF.
1570 CSINC rd, rn, rm, cond
1572 This instruction conditionally increments rn or rm and places the result
1573 in rd. rn is chosen is the condition is true.
1575 RD is the destination register.
1576 RN and RM are the source registers.
1577 COND is the encoded condition. */
1580 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1581 struct aarch64_register rn
, struct aarch64_register rm
,
1584 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1586 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1587 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1588 | ENCODE (rd
.num
, 5, 0));
1591 /* Write a CSET instruction into *BUF.
1595 This instruction conditionally write 1 or 0 in the destination register.
1596 1 is written if the condition is true. This is an alias for:
1598 CSINC rd, xzr, xzr, !cond
1600 Note that the condition needs to be inverted.
1602 RD is the destination register.
1603 RN and RM are the source registers.
1604 COND is the encoded condition. */
1607 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1609 /* The least significant bit of the condition needs toggling in order to
1611 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1614 /* Write LEN instructions from BUF into the inferior memory at *TO.
1616 Note instructions are always little endian on AArch64, unlike data. */
1619 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1621 size_t byte_len
= len
* sizeof (uint32_t);
1622 #if (__BYTE_ORDER == __BIG_ENDIAN)
1623 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1626 for (i
= 0; i
< len
; i
++)
1627 le_buf
[i
] = htole32 (buf
[i
]);
1629 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1633 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1639 /* Sub-class of struct aarch64_insn_data, store information of
1640 instruction relocation for fast tracepoint. Visitor can
1641 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1642 the relocated instructions in buffer pointed by INSN_PTR. */
1644 struct aarch64_insn_relocation_data
1646 struct aarch64_insn_data base
;
1648 /* The new address the instruction is relocated to. */
1650 /* Pointer to the buffer of relocated instruction(s). */
1654 /* Implementation of aarch64_insn_visitor method "b". */
1657 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1658 struct aarch64_insn_data
*data
)
1660 struct aarch64_insn_relocation_data
*insn_reloc
1661 = (struct aarch64_insn_relocation_data
*) data
;
1663 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1665 if (can_encode_int32 (new_offset
, 28))
1666 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1669 /* Implementation of aarch64_insn_visitor method "b_cond". */
1672 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1673 struct aarch64_insn_data
*data
)
1675 struct aarch64_insn_relocation_data
*insn_reloc
1676 = (struct aarch64_insn_relocation_data
*) data
;
1678 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1680 if (can_encode_int32 (new_offset
, 21))
1682 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1685 else if (can_encode_int32 (new_offset
, 28))
1687 /* The offset is out of range for a conditional branch
1688 instruction but not for a unconditional branch. We can use
1689 the following instructions instead:
1691 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1692 B NOT_TAKEN ; Else jump over TAKEN and continue.
1699 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1700 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1701 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1705 /* Implementation of aarch64_insn_visitor method "cb". */
1708 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1709 const unsigned rn
, int is64
,
1710 struct aarch64_insn_data
*data
)
1712 struct aarch64_insn_relocation_data
*insn_reloc
1713 = (struct aarch64_insn_relocation_data
*) data
;
1715 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1717 if (can_encode_int32 (new_offset
, 21))
1719 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1720 aarch64_register (rn
, is64
), new_offset
);
1722 else if (can_encode_int32 (new_offset
, 28))
1724 /* The offset is out of range for a compare and branch
1725 instruction but not for a unconditional branch. We can use
1726 the following instructions instead:
1728 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1729 B NOT_TAKEN ; Else jump over TAKEN and continue.
1735 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1736 aarch64_register (rn
, is64
), 8);
1737 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1738 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1742 /* Implementation of aarch64_insn_visitor method "tb". */
1745 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1746 const unsigned rt
, unsigned bit
,
1747 struct aarch64_insn_data
*data
)
1749 struct aarch64_insn_relocation_data
*insn_reloc
1750 = (struct aarch64_insn_relocation_data
*) data
;
1752 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1754 if (can_encode_int32 (new_offset
, 16))
1756 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1757 aarch64_register (rt
, 1), new_offset
);
1759 else if (can_encode_int32 (new_offset
, 28))
1761 /* The offset is out of range for a test bit and branch
1762 instruction but not for a unconditional branch. We can use
1763 the following instructions instead:
1765 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1766 B NOT_TAKEN ; Else jump over TAKEN and continue.
1772 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1773 aarch64_register (rt
, 1), 8);
1774 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1775 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1780 /* Implementation of aarch64_insn_visitor method "adr". */
1783 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1785 struct aarch64_insn_data
*data
)
1787 struct aarch64_insn_relocation_data
*insn_reloc
1788 = (struct aarch64_insn_relocation_data
*) data
;
1789 /* We know exactly the address the ADR{P,} instruction will compute.
1790 We can just write it to the destination register. */
1791 CORE_ADDR address
= data
->insn_addr
+ offset
;
1795 /* Clear the lower 12 bits of the offset to get the 4K page. */
1796 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1797 aarch64_register (rd
, 1),
1801 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1802 aarch64_register (rd
, 1), address
);
1805 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1808 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1809 const unsigned rt
, const int is64
,
1810 struct aarch64_insn_data
*data
)
1812 struct aarch64_insn_relocation_data
*insn_reloc
1813 = (struct aarch64_insn_relocation_data
*) data
;
1814 CORE_ADDR address
= data
->insn_addr
+ offset
;
1816 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1817 aarch64_register (rt
, 1), address
);
1819 /* We know exactly what address to load from, and what register we
1822 MOV xd, #(oldloc + offset)
1823 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1826 LDR xd, [xd] ; or LDRSW xd, [xd]
1831 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1832 aarch64_register (rt
, 1),
1833 aarch64_register (rt
, 1),
1834 offset_memory_operand (0));
1836 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1837 aarch64_register (rt
, is64
),
1838 aarch64_register (rt
, 1),
1839 offset_memory_operand (0));
1842 /* Implementation of aarch64_insn_visitor method "others". */
1845 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1846 struct aarch64_insn_data
*data
)
1848 struct aarch64_insn_relocation_data
*insn_reloc
1849 = (struct aarch64_insn_relocation_data
*) data
;
1851 /* The instruction is not PC relative. Just re-emit it at the new
1853 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1856 static const struct aarch64_insn_visitor visitor
=
1858 aarch64_ftrace_insn_reloc_b
,
1859 aarch64_ftrace_insn_reloc_b_cond
,
1860 aarch64_ftrace_insn_reloc_cb
,
1861 aarch64_ftrace_insn_reloc_tb
,
1862 aarch64_ftrace_insn_reloc_adr
,
1863 aarch64_ftrace_insn_reloc_ldr_literal
,
1864 aarch64_ftrace_insn_reloc_others
,
1867 /* Implementation of linux_target_ops method
1868 "install_fast_tracepoint_jump_pad". */
1871 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1873 CORE_ADDR collector
,
1876 CORE_ADDR
*jump_entry
,
1877 CORE_ADDR
*trampoline
,
1878 ULONGEST
*trampoline_size
,
1879 unsigned char *jjump_pad_insn
,
1880 ULONGEST
*jjump_pad_insn_size
,
1881 CORE_ADDR
*adjusted_insn_addr
,
1882 CORE_ADDR
*adjusted_insn_addr_end
,
1890 CORE_ADDR buildaddr
= *jump_entry
;
1891 struct aarch64_insn_relocation_data insn_data
;
1893 /* We need to save the current state on the stack both to restore it
1894 later and to collect register values when the tracepoint is hit.
1896 The saved registers are pushed in a layout that needs to be in sync
1897 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1898 the supply_fast_tracepoint_registers function will fill in the
1899 register cache from a pointer to saved registers on the stack we build
1902 For simplicity, we set the size of each cell on the stack to 16 bytes.
1903 This way one cell can hold any register type, from system registers
1904 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1905 has to be 16 bytes aligned anyway.
1907 Note that the CPSR register does not exist on AArch64. Instead we
1908 can access system bits describing the process state with the
1909 MRS/MSR instructions, namely the condition flags. We save them as
1910 if they are part of a CPSR register because that's how GDB
1911 interprets these system bits. At the moment, only the condition
1912 flags are saved in CPSR (NZCV).
1914 Stack layout, each cell is 16 bytes (descending):
1916 High *-------- SIMD&FP registers from 31 down to 0. --------*
1922 *---- General purpose registers from 30 down to 0. ----*
1928 *------------- Special purpose registers. -------------*
1931 | CPSR (NZCV) | 5 cells
1934 *------------- collecting_t object --------------------*
1935 | TPIDR_EL0 | struct tracepoint * |
1936 Low *------------------------------------------------------*
1938 After this stack is set up, we issue a call to the collector, passing
1939 it the saved registers at (SP + 16). */
1941 /* Push SIMD&FP registers on the stack:
1943 SUB sp, sp, #(32 * 16)
1945 STP q30, q31, [sp, #(30 * 16)]
1950 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1951 for (i
= 30; i
>= 0; i
-= 2)
1952 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1954 /* Push general puspose registers on the stack. Note that we do not need
1955 to push x31 as it represents the xzr register and not the stack
1956 pointer in a STR instruction.
1958 SUB sp, sp, #(31 * 16)
1960 STR x30, [sp, #(30 * 16)]
1965 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1966 for (i
= 30; i
>= 0; i
-= 1)
1967 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1968 offset_memory_operand (i
* 16));
1970 /* Make space for 5 more cells.
1972 SUB sp, sp, #(5 * 16)
1975 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1980 ADD x4, sp, #((32 + 31 + 5) * 16)
1981 STR x4, [sp, #(4 * 16)]
1984 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1985 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1987 /* Save PC (tracepoint address):
1992 STR x3, [sp, #(3 * 16)]
1996 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1997 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1999 /* Save CPSR (NZCV), FPSR and FPCR:
2005 STR x2, [sp, #(2 * 16)]
2006 STR x1, [sp, #(1 * 16)]
2007 STR x0, [sp, #(0 * 16)]
2010 p
+= emit_mrs (p
, x2
, NZCV
);
2011 p
+= emit_mrs (p
, x1
, FPSR
);
2012 p
+= emit_mrs (p
, x0
, FPCR
);
2013 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2014 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2015 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2017 /* Push the collecting_t object. It consist of the address of the
2018 tracepoint and an ID for the current thread. We get the latter by
2019 reading the tpidr_el0 system register. It corresponds to the
2020 NT_ARM_TLS register accessible with ptrace.
2027 STP x0, x1, [sp, #-16]!
2031 p
+= emit_mov_addr (p
, x0
, tpoint
);
2032 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2033 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2037 The shared memory for the lock is at lockaddr. It will hold zero
2038 if no-one is holding the lock, otherwise it contains the address of
2039 the collecting_t object on the stack of the thread which acquired it.
2041 At this stage, the stack pointer points to this thread's collecting_t
2044 We use the following registers:
2045 - x0: Address of the lock.
2046 - x1: Pointer to collecting_t object.
2047 - x2: Scratch register.
2053 ; Trigger an event local to this core. So the following WFE
2054 ; instruction is ignored.
2057 ; Wait for an event. The event is triggered by either the SEVL
2058 ; or STLR instructions (store release).
2061 ; Atomically read at lockaddr. This marks the memory location as
2062 ; exclusive. This instruction also has memory constraints which
2063 ; make sure all previous data reads and writes are done before
2067 ; Try again if another thread holds the lock.
2070 ; We can lock it! Write the address of the collecting_t object.
2071 ; This instruction will fail if the memory location is not marked
2072 ; as exclusive anymore. If it succeeds, it will remove the
2073 ; exclusive mark on the memory location. This way, if another
2074 ; thread executes this instruction before us, we will fail and try
2081 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2082 p
+= emit_mov (p
, x1
, register_operand (sp
));
2086 p
+= emit_ldaxr (p
, x2
, x0
);
2087 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2088 p
+= emit_stxr (p
, w2
, x1
, x0
);
2089 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2091 /* Call collector (struct tracepoint *, unsigned char *):
2096 ; Saved registers start after the collecting_t object.
2099 ; We use an intra-procedure-call scratch register.
2100 MOV ip0, #(collector)
2103 ; And call back to C!
2108 p
+= emit_mov_addr (p
, x0
, tpoint
);
2109 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2111 p
+= emit_mov_addr (p
, ip0
, collector
);
2112 p
+= emit_blr (p
, ip0
);
2114 /* Release the lock.
2119 ; This instruction is a normal store with memory ordering
2120 ; constraints. Thanks to this we do not have to put a data
2121 ; barrier instruction to make sure all data read and writes are done
2122 ; before this instruction is executed. Furthermore, this instrucion
2123 ; will trigger an event, letting other threads know they can grab
2128 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2129 p
+= emit_stlr (p
, xzr
, x0
);
2131 /* Free collecting_t object:
2136 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2138 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2139 registers from the stack.
2141 LDR x2, [sp, #(2 * 16)]
2142 LDR x1, [sp, #(1 * 16)]
2143 LDR x0, [sp, #(0 * 16)]
2149 ADD sp, sp #(5 * 16)
2152 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2153 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2154 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2155 p
+= emit_msr (p
, NZCV
, x2
);
2156 p
+= emit_msr (p
, FPSR
, x1
);
2157 p
+= emit_msr (p
, FPCR
, x0
);
2159 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2161 /* Pop general purpose registers:
2165 LDR x30, [sp, #(30 * 16)]
2167 ADD sp, sp, #(31 * 16)
2170 for (i
= 0; i
<= 30; i
+= 1)
2171 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2172 offset_memory_operand (i
* 16));
2173 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2175 /* Pop SIMD&FP registers:
2179 LDP q30, q31, [sp, #(30 * 16)]
2181 ADD sp, sp, #(32 * 16)
2184 for (i
= 0; i
<= 30; i
+= 2)
2185 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2186 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2188 /* Write the code into the inferior memory. */
2189 append_insns (&buildaddr
, p
- buf
, buf
);
2191 /* Now emit the relocated instruction. */
2192 *adjusted_insn_addr
= buildaddr
;
2193 target_read_uint32 (tpaddr
, &insn
);
2195 insn_data
.base
.insn_addr
= tpaddr
;
2196 insn_data
.new_addr
= buildaddr
;
2197 insn_data
.insn_ptr
= buf
;
2199 aarch64_relocate_instruction (insn
, &visitor
,
2200 (struct aarch64_insn_data
*) &insn_data
);
2202 /* We may not have been able to relocate the instruction. */
2203 if (insn_data
.insn_ptr
== buf
)
2206 "E.Could not relocate instruction from %s to %s.",
2207 core_addr_to_string_nz (tpaddr
),
2208 core_addr_to_string_nz (buildaddr
));
2212 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2213 *adjusted_insn_addr_end
= buildaddr
;
2215 /* Go back to the start of the buffer. */
2218 /* Emit a branch back from the jump pad. */
2219 offset
= (tpaddr
+ orig_size
- buildaddr
);
2220 if (!can_encode_int32 (offset
, 28))
2223 "E.Jump back from jump pad too far from tracepoint "
2224 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2229 p
+= emit_b (p
, 0, offset
);
2230 append_insns (&buildaddr
, p
- buf
, buf
);
2232 /* Give the caller a branch instruction into the jump pad. */
2233 offset
= (*jump_entry
- tpaddr
);
2234 if (!can_encode_int32 (offset
, 28))
2237 "E.Jump pad too far from tracepoint "
2238 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2243 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2244 *jjump_pad_insn_size
= 4;
2246 /* Return the end address of our pad. */
2247 *jump_entry
= buildaddr
;
2252 /* Helper function writing LEN instructions from START into
2253 current_insn_ptr. */
2256 emit_ops_insns (const uint32_t *start
, int len
)
2258 CORE_ADDR buildaddr
= current_insn_ptr
;
2261 debug_printf ("Adding %d instrucions at %s\n",
2262 len
, paddress (buildaddr
));
2264 append_insns (&buildaddr
, len
, start
);
2265 current_insn_ptr
= buildaddr
;
2268 /* Pop a register from the stack. */
2271 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2273 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2276 /* Push a register on the stack. */
2279 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2281 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2284 /* Implementation of emit_ops method "emit_prologue". */
2287 aarch64_emit_prologue (void)
2292 /* This function emit a prologue for the following function prototype:
2294 enum eval_result_type f (unsigned char *regs,
2297 The first argument is a buffer of raw registers. The second
2298 argument is the result of
2299 evaluating the expression, which will be set to whatever is on top of
2300 the stack at the end.
2302 The stack set up by the prologue is as such:
2304 High *------------------------------------------------------*
2307 | x1 (ULONGEST *value) |
2308 | x0 (unsigned char *regs) |
2309 Low *------------------------------------------------------*
2311 As we are implementing a stack machine, each opcode can expand the
2312 stack so we never know how far we are from the data saved by this
2313 prologue. In order to be able refer to value and regs later, we save
2314 the current stack pointer in the frame pointer. This way, it is not
2315 clobbered when calling C functions.
2317 Finally, throughtout every operation, we are using register x0 as the
2318 top of the stack, and x1 as a scratch register. */
2320 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2321 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2322 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2324 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2327 emit_ops_insns (buf
, p
- buf
);
2330 /* Implementation of emit_ops method "emit_epilogue". */
2333 aarch64_emit_epilogue (void)
2338 /* Store the result of the expression (x0) in *value. */
2339 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2340 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2341 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2343 /* Restore the previous state. */
2344 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2345 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2347 /* Return expr_eval_no_error. */
2348 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2349 p
+= emit_ret (p
, lr
);
2351 emit_ops_insns (buf
, p
- buf
);
2354 /* Implementation of emit_ops method "emit_add". */
2357 aarch64_emit_add (void)
2362 p
+= emit_pop (p
, x1
);
2363 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2365 emit_ops_insns (buf
, p
- buf
);
2368 /* Implementation of emit_ops method "emit_sub". */
2371 aarch64_emit_sub (void)
2376 p
+= emit_pop (p
, x1
);
2377 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2379 emit_ops_insns (buf
, p
- buf
);
2382 /* Implementation of emit_ops method "emit_mul". */
2385 aarch64_emit_mul (void)
2390 p
+= emit_pop (p
, x1
);
2391 p
+= emit_mul (p
, x0
, x1
, x0
);
2393 emit_ops_insns (buf
, p
- buf
);
2396 /* Implementation of emit_ops method "emit_lsh". */
2399 aarch64_emit_lsh (void)
2404 p
+= emit_pop (p
, x1
);
2405 p
+= emit_lslv (p
, x0
, x1
, x0
);
2407 emit_ops_insns (buf
, p
- buf
);
2410 /* Implementation of emit_ops method "emit_rsh_signed". */
2413 aarch64_emit_rsh_signed (void)
2418 p
+= emit_pop (p
, x1
);
2419 p
+= emit_asrv (p
, x0
, x1
, x0
);
2421 emit_ops_insns (buf
, p
- buf
);
2424 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2427 aarch64_emit_rsh_unsigned (void)
2432 p
+= emit_pop (p
, x1
);
2433 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2435 emit_ops_insns (buf
, p
- buf
);
2438 /* Implementation of emit_ops method "emit_ext". */
2441 aarch64_emit_ext (int arg
)
2446 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2448 emit_ops_insns (buf
, p
- buf
);
2451 /* Implementation of emit_ops method "emit_log_not". */
2454 aarch64_emit_log_not (void)
2459 /* If the top of the stack is 0, replace it with 1. Else replace it with
2462 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2463 p
+= emit_cset (p
, x0
, EQ
);
2465 emit_ops_insns (buf
, p
- buf
);
2468 /* Implementation of emit_ops method "emit_bit_and". */
2471 aarch64_emit_bit_and (void)
2476 p
+= emit_pop (p
, x1
);
2477 p
+= emit_and (p
, x0
, x0
, x1
);
2479 emit_ops_insns (buf
, p
- buf
);
2482 /* Implementation of emit_ops method "emit_bit_or". */
2485 aarch64_emit_bit_or (void)
2490 p
+= emit_pop (p
, x1
);
2491 p
+= emit_orr (p
, x0
, x0
, x1
);
2493 emit_ops_insns (buf
, p
- buf
);
2496 /* Implementation of emit_ops method "emit_bit_xor". */
2499 aarch64_emit_bit_xor (void)
2504 p
+= emit_pop (p
, x1
);
2505 p
+= emit_eor (p
, x0
, x0
, x1
);
2507 emit_ops_insns (buf
, p
- buf
);
2510 /* Implementation of emit_ops method "emit_bit_not". */
2513 aarch64_emit_bit_not (void)
2518 p
+= emit_mvn (p
, x0
, x0
);
2520 emit_ops_insns (buf
, p
- buf
);
2523 /* Implementation of emit_ops method "emit_equal". */
2526 aarch64_emit_equal (void)
2531 p
+= emit_pop (p
, x1
);
2532 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2533 p
+= emit_cset (p
, x0
, EQ
);
2535 emit_ops_insns (buf
, p
- buf
);
2538 /* Implementation of emit_ops method "emit_less_signed". */
2541 aarch64_emit_less_signed (void)
2546 p
+= emit_pop (p
, x1
);
2547 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2548 p
+= emit_cset (p
, x0
, LT
);
2550 emit_ops_insns (buf
, p
- buf
);
2553 /* Implementation of emit_ops method "emit_less_unsigned". */
2556 aarch64_emit_less_unsigned (void)
2561 p
+= emit_pop (p
, x1
);
2562 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2563 p
+= emit_cset (p
, x0
, LO
);
2565 emit_ops_insns (buf
, p
- buf
);
2568 /* Implementation of emit_ops method "emit_ref". */
2571 aarch64_emit_ref (int size
)
2579 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2582 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2585 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2588 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2591 /* Unknown size, bail on compilation. */
2596 emit_ops_insns (buf
, p
- buf
);
2599 /* Implementation of emit_ops method "emit_if_goto". */
2602 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2607 /* The Z flag is set or cleared here. */
2608 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2609 /* This instruction must not change the Z flag. */
2610 p
+= emit_pop (p
, x0
);
2611 /* Branch over the next instruction if x0 == 0. */
2612 p
+= emit_bcond (p
, EQ
, 8);
2614 /* The NOP instruction will be patched with an unconditional branch. */
2616 *offset_p
= (p
- buf
) * 4;
2621 emit_ops_insns (buf
, p
- buf
);
2624 /* Implementation of emit_ops method "emit_goto". */
2627 aarch64_emit_goto (int *offset_p
, int *size_p
)
2632 /* The NOP instruction will be patched with an unconditional branch. */
2639 emit_ops_insns (buf
, p
- buf
);
2642 /* Implementation of emit_ops method "write_goto_address". */
2645 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2649 emit_b (&insn
, 0, to
- from
);
2650 append_insns (&from
, 1, &insn
);
2653 /* Implementation of emit_ops method "emit_const". */
2656 aarch64_emit_const (LONGEST num
)
2661 p
+= emit_mov_addr (p
, x0
, num
);
2663 emit_ops_insns (buf
, p
- buf
);
2666 /* Implementation of emit_ops method "emit_call". */
2669 aarch64_emit_call (CORE_ADDR fn
)
2674 p
+= emit_mov_addr (p
, ip0
, fn
);
2675 p
+= emit_blr (p
, ip0
);
2677 emit_ops_insns (buf
, p
- buf
);
2680 /* Implementation of emit_ops method "emit_reg". */
2683 aarch64_emit_reg (int reg
)
2688 /* Set x0 to unsigned char *regs. */
2689 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2690 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2691 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2693 emit_ops_insns (buf
, p
- buf
);
2695 aarch64_emit_call (get_raw_reg_func_addr ());
2698 /* Implementation of emit_ops method "emit_pop". */
2701 aarch64_emit_pop (void)
2706 p
+= emit_pop (p
, x0
);
2708 emit_ops_insns (buf
, p
- buf
);
2711 /* Implementation of emit_ops method "emit_stack_flush". */
2714 aarch64_emit_stack_flush (void)
2719 p
+= emit_push (p
, x0
);
2721 emit_ops_insns (buf
, p
- buf
);
2724 /* Implementation of emit_ops method "emit_zero_ext". */
2727 aarch64_emit_zero_ext (int arg
)
2732 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2734 emit_ops_insns (buf
, p
- buf
);
2737 /* Implementation of emit_ops method "emit_swap". */
2740 aarch64_emit_swap (void)
2745 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2746 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2747 p
+= emit_mov (p
, x0
, register_operand (x1
));
2749 emit_ops_insns (buf
, p
- buf
);
2752 /* Implementation of emit_ops method "emit_stack_adjust". */
2755 aarch64_emit_stack_adjust (int n
)
2757 /* This is not needed with our design. */
2761 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2763 emit_ops_insns (buf
, p
- buf
);
2766 /* Implementation of emit_ops method "emit_int_call_1". */
2769 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2774 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2776 emit_ops_insns (buf
, p
- buf
);
2778 aarch64_emit_call (fn
);
2781 /* Implementation of emit_ops method "emit_void_call_2". */
2784 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2789 /* Push x0 on the stack. */
2790 aarch64_emit_stack_flush ();
2792 /* Setup arguments for the function call:
2795 x1: top of the stack
2800 p
+= emit_mov (p
, x1
, register_operand (x0
));
2801 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2803 emit_ops_insns (buf
, p
- buf
);
2805 aarch64_emit_call (fn
);
2808 aarch64_emit_pop ();
2811 /* Implementation of emit_ops method "emit_eq_goto". */
2814 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2819 p
+= emit_pop (p
, x1
);
2820 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2821 /* Branch over the next instruction if x0 != x1. */
2822 p
+= emit_bcond (p
, NE
, 8);
2823 /* The NOP instruction will be patched with an unconditional branch. */
2825 *offset_p
= (p
- buf
) * 4;
2830 emit_ops_insns (buf
, p
- buf
);
2833 /* Implementation of emit_ops method "emit_ne_goto". */
2836 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2841 p
+= emit_pop (p
, x1
);
2842 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2843 /* Branch over the next instruction if x0 == x1. */
2844 p
+= emit_bcond (p
, EQ
, 8);
2845 /* The NOP instruction will be patched with an unconditional branch. */
2847 *offset_p
= (p
- buf
) * 4;
2852 emit_ops_insns (buf
, p
- buf
);
2855 /* Implementation of emit_ops method "emit_lt_goto". */
2858 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2863 p
+= emit_pop (p
, x1
);
2864 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2865 /* Branch over the next instruction if x0 >= x1. */
2866 p
+= emit_bcond (p
, GE
, 8);
2867 /* The NOP instruction will be patched with an unconditional branch. */
2869 *offset_p
= (p
- buf
) * 4;
2874 emit_ops_insns (buf
, p
- buf
);
2877 /* Implementation of emit_ops method "emit_le_goto". */
2880 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2885 p
+= emit_pop (p
, x1
);
2886 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2887 /* Branch over the next instruction if x0 > x1. */
2888 p
+= emit_bcond (p
, GT
, 8);
2889 /* The NOP instruction will be patched with an unconditional branch. */
2891 *offset_p
= (p
- buf
) * 4;
2896 emit_ops_insns (buf
, p
- buf
);
2899 /* Implementation of emit_ops method "emit_gt_goto". */
2902 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2907 p
+= emit_pop (p
, x1
);
2908 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2909 /* Branch over the next instruction if x0 <= x1. */
2910 p
+= emit_bcond (p
, LE
, 8);
2911 /* The NOP instruction will be patched with an unconditional branch. */
2913 *offset_p
= (p
- buf
) * 4;
2918 emit_ops_insns (buf
, p
- buf
);
2921 /* Implementation of emit_ops method "emit_ge_got". */
2924 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2929 p
+= emit_pop (p
, x1
);
2930 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2931 /* Branch over the next instruction if x0 <= x1. */
2932 p
+= emit_bcond (p
, LT
, 8);
2933 /* The NOP instruction will be patched with an unconditional branch. */
2935 *offset_p
= (p
- buf
) * 4;
2940 emit_ops_insns (buf
, p
- buf
);
2943 static struct emit_ops aarch64_emit_ops_impl
=
2945 aarch64_emit_prologue
,
2946 aarch64_emit_epilogue
,
2951 aarch64_emit_rsh_signed
,
2952 aarch64_emit_rsh_unsigned
,
2954 aarch64_emit_log_not
,
2955 aarch64_emit_bit_and
,
2956 aarch64_emit_bit_or
,
2957 aarch64_emit_bit_xor
,
2958 aarch64_emit_bit_not
,
2960 aarch64_emit_less_signed
,
2961 aarch64_emit_less_unsigned
,
2963 aarch64_emit_if_goto
,
2965 aarch64_write_goto_address
,
2970 aarch64_emit_stack_flush
,
2971 aarch64_emit_zero_ext
,
2973 aarch64_emit_stack_adjust
,
2974 aarch64_emit_int_call_1
,
2975 aarch64_emit_void_call_2
,
2976 aarch64_emit_eq_goto
,
2977 aarch64_emit_ne_goto
,
2978 aarch64_emit_lt_goto
,
2979 aarch64_emit_le_goto
,
2980 aarch64_emit_gt_goto
,
2981 aarch64_emit_ge_got
,
2984 /* Implementation of linux_target_ops method "emit_ops". */
2986 static struct emit_ops
*
2987 aarch64_emit_ops (void)
2989 return &aarch64_emit_ops_impl
;
2992 /* Implementation of linux_target_ops method
2993 "get_min_fast_tracepoint_insn_len". */
2996 aarch64_get_min_fast_tracepoint_insn_len (void)
3001 /* Implementation of linux_target_ops method "supports_range_stepping". */
3004 aarch64_supports_range_stepping (void)
3009 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3011 static const gdb_byte
*
3012 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3014 if (is_64bit_tdesc ())
3016 *size
= aarch64_breakpoint_len
;
3017 return aarch64_breakpoint
;
3020 return arm_sw_breakpoint_from_kind (kind
, size
);
3023 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3026 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3028 if (is_64bit_tdesc ())
3029 return aarch64_breakpoint_len
;
3031 return arm_breakpoint_kind_from_pc (pcptr
);
3034 /* Implementation of the linux_target_ops method
3035 "breakpoint_kind_from_current_state". */
3038 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3040 if (is_64bit_tdesc ())
3041 return aarch64_breakpoint_len
;
3043 return arm_breakpoint_kind_from_current_state (pcptr
);
3046 /* Support for hardware single step. */
3049 aarch64_supports_hardware_single_step (void)
3054 struct linux_target_ops the_low_target
=
3058 NULL
, /* cannot_fetch_register */
3059 NULL
, /* cannot_store_register */
3060 NULL
, /* fetch_register */
3063 aarch64_breakpoint_kind_from_pc
,
3064 aarch64_sw_breakpoint_from_kind
,
3065 NULL
, /* get_next_pcs */
3066 0, /* decr_pc_after_break */
3067 aarch64_breakpoint_at
,
3068 aarch64_supports_z_point_type
,
3069 aarch64_insert_point
,
3070 aarch64_remove_point
,
3071 aarch64_stopped_by_watchpoint
,
3072 aarch64_stopped_data_address
,
3073 NULL
, /* collect_ptrace_register */
3074 NULL
, /* supply_ptrace_register */
3075 aarch64_linux_siginfo_fixup
,
3076 aarch64_linux_new_process
,
3077 aarch64_linux_delete_process
,
3078 aarch64_linux_new_thread
,
3079 aarch64_linux_delete_thread
,
3080 aarch64_linux_new_fork
,
3081 aarch64_linux_prepare_to_resume
,
3082 NULL
, /* process_qsupported */
3083 aarch64_supports_tracepoints
,
3084 aarch64_get_thread_area
,
3085 aarch64_install_fast_tracepoint_jump_pad
,
3087 aarch64_get_min_fast_tracepoint_insn_len
,
3088 aarch64_supports_range_stepping
,
3089 aarch64_breakpoint_kind_from_current_state
,
3090 aarch64_supports_hardware_single_step
,
3091 aarch64_get_syscall_trapinfo
,
3095 initialize_low_arch (void)
3097 initialize_low_arch_aarch32 ();
3099 initialize_regsets_info (&aarch64_regsets_info
);
3100 initialize_regsets_info (&aarch64_sve_regsets_info
);