1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
60 void low_arch_setup () override
;
63 /* The singleton target ops object. */
65 static aarch64_target the_aarch64_target
;
67 /* Per-process arch-specific data we want to keep. */
69 struct arch_process_info
71 /* Hardware breakpoint/watchpoint data.
72 The reason for them to be per-process rather than per-thread is
73 due to the lack of information in the gdbserver environment;
74 gdbserver is not told that whether a requested hardware
75 breakpoint/watchpoint is thread specific or not, so it has to set
76 each hw bp/wp for every thread in the current process. The
77 higher level bp/wp management in gdb will resume a thread if a hw
78 bp/wp trap is not expected for it. Since the hw bp/wp setting is
79 same for each thread, it is reasonable for the data to live here.
81 struct aarch64_debug_reg_state debug_reg_state
;
84 /* Return true if the size of register 0 is 8 byte. */
89 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
91 return register_size (regcache
->tdesc
, 0) == 8;
94 /* Return true if the regcache contains the number of SVE registers. */
99 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
101 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
105 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
107 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
110 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
111 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
112 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
113 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
114 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
118 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
120 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
123 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
124 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
125 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
126 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
127 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
131 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
133 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
136 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
137 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
138 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
139 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
143 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
145 const struct user_fpsimd_state
*regset
146 = (const struct user_fpsimd_state
*) buf
;
149 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
150 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
151 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
152 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
155 /* Store the pauth registers to regcache. */
158 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
160 uint64_t *pauth_regset
= (uint64_t *) buf
;
161 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
166 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
168 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
172 /* Implementation of linux_target_ops method "get_pc". */
175 aarch64_get_pc (struct regcache
*regcache
)
177 if (register_size (regcache
->tdesc
, 0) == 8)
178 return linux_get_pc_64bit (regcache
);
180 return linux_get_pc_32bit (regcache
);
183 /* Implementation of linux_target_ops method "set_pc". */
186 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
188 if (register_size (regcache
->tdesc
, 0) == 8)
189 linux_set_pc_64bit (regcache
, pc
);
191 linux_set_pc_32bit (regcache
, pc
);
194 #define aarch64_breakpoint_len 4
196 /* AArch64 BRK software debug mode instruction.
197 This instruction needs to match gdb/aarch64-tdep.c
198 (aarch64_default_breakpoint). */
199 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
201 /* Implementation of linux_target_ops method "breakpoint_at". */
204 aarch64_breakpoint_at (CORE_ADDR where
)
206 if (is_64bit_tdesc ())
208 gdb_byte insn
[aarch64_breakpoint_len
];
210 the_target
->read_memory (where
, (unsigned char *) &insn
,
211 aarch64_breakpoint_len
);
212 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
218 return arm_breakpoint_at (where
);
222 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
226 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
228 state
->dr_addr_bp
[i
] = 0;
229 state
->dr_ctrl_bp
[i
] = 0;
230 state
->dr_ref_count_bp
[i
] = 0;
233 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
235 state
->dr_addr_wp
[i
] = 0;
236 state
->dr_ctrl_wp
[i
] = 0;
237 state
->dr_ref_count_wp
[i
] = 0;
241 /* Return the pointer to the debug register state structure in the
242 current process' arch-specific data area. */
244 struct aarch64_debug_reg_state
*
245 aarch64_get_debug_reg_state (pid_t pid
)
247 struct process_info
*proc
= find_process_pid (pid
);
249 return &proc
->priv
->arch_private
->debug_reg_state
;
252 /* Implementation of linux_target_ops method "supports_z_point_type". */
255 aarch64_supports_z_point_type (char z_type
)
261 case Z_PACKET_WRITE_WP
:
262 case Z_PACKET_READ_WP
:
263 case Z_PACKET_ACCESS_WP
:
270 /* Implementation of linux_target_ops method "insert_point".
272 It actually only records the info of the to-be-inserted bp/wp;
273 the actual insertion will happen when threads are resumed. */
276 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
277 int len
, struct raw_breakpoint
*bp
)
280 enum target_hw_bp_type targ_type
;
281 struct aarch64_debug_reg_state
*state
282 = aarch64_get_debug_reg_state (pid_of (current_thread
));
285 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
286 (unsigned long) addr
, len
);
288 /* Determine the type from the raw breakpoint type. */
289 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
291 if (targ_type
!= hw_execute
)
293 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
294 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
295 1 /* is_insert */, state
);
303 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
304 instruction. Set it to 2 to correctly encode length bit
305 mask in hardware/watchpoint control register. */
308 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
309 1 /* is_insert */, state
);
313 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
319 /* Implementation of linux_target_ops method "remove_point".
321 It actually only records the info of the to-be-removed bp/wp,
322 the actual removal will be done when threads are resumed. */
325 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
326 int len
, struct raw_breakpoint
*bp
)
329 enum target_hw_bp_type targ_type
;
330 struct aarch64_debug_reg_state
*state
331 = aarch64_get_debug_reg_state (pid_of (current_thread
));
334 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
335 (unsigned long) addr
, len
);
337 /* Determine the type from the raw breakpoint type. */
338 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
340 /* Set up state pointers. */
341 if (targ_type
!= hw_execute
)
343 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
349 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
350 instruction. Set it to 2 to correctly encode length bit
351 mask in hardware/watchpoint control register. */
354 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
355 0 /* is_insert */, state
);
359 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
365 /* Implementation of linux_target_ops method "stopped_data_address". */
368 aarch64_stopped_data_address (void)
372 struct aarch64_debug_reg_state
*state
;
374 pid
= lwpid_of (current_thread
);
376 /* Get the siginfo. */
377 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
378 return (CORE_ADDR
) 0;
380 /* Need to be a hardware breakpoint/watchpoint trap. */
381 if (siginfo
.si_signo
!= SIGTRAP
382 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
383 return (CORE_ADDR
) 0;
385 /* Check if the address matches any watched address. */
386 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
387 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
389 const unsigned int offset
390 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
391 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
392 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
393 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
394 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
395 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
397 if (state
->dr_ref_count_wp
[i
]
398 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
399 && addr_trap
>= addr_watch_aligned
400 && addr_trap
< addr_watch
+ len
)
402 /* ADDR_TRAP reports the first address of the memory range
403 accessed by the CPU, regardless of what was the memory
404 range watched. Thus, a large CPU access that straddles
405 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
406 ADDR_TRAP that is lower than the
407 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
409 addr: | 4 | 5 | 6 | 7 | 8 |
410 |---- range watched ----|
411 |----------- range accessed ------------|
413 In this case, ADDR_TRAP will be 4.
415 To match a watchpoint known to GDB core, we must never
416 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
417 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
418 positive on kernels older than 4.10. See PR
424 return (CORE_ADDR
) 0;
427 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
430 aarch64_stopped_by_watchpoint (void)
432 if (aarch64_stopped_data_address () != 0)
438 /* Fetch the thread-local storage pointer for libthread_db. */
441 ps_get_thread_area (struct ps_prochandle
*ph
,
442 lwpid_t lwpid
, int idx
, void **base
)
444 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
448 /* Implementation of linux_target_ops method "siginfo_fixup". */
451 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
453 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
454 if (!is_64bit_tdesc ())
457 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
460 aarch64_siginfo_from_compat_siginfo (native
,
461 (struct compat_siginfo
*) inf
);
469 /* Implementation of linux_target_ops method "new_process". */
471 static struct arch_process_info
*
472 aarch64_linux_new_process (void)
474 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
476 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
481 /* Implementation of linux_target_ops method "delete_process". */
484 aarch64_linux_delete_process (struct arch_process_info
*info
)
489 /* Implementation of linux_target_ops method "linux_new_fork". */
492 aarch64_linux_new_fork (struct process_info
*parent
,
493 struct process_info
*child
)
495 /* These are allocated by linux_add_process. */
496 gdb_assert (parent
->priv
!= NULL
497 && parent
->priv
->arch_private
!= NULL
);
498 gdb_assert (child
->priv
!= NULL
499 && child
->priv
->arch_private
!= NULL
);
501 /* Linux kernel before 2.6.33 commit
502 72f674d203cd230426437cdcf7dd6f681dad8b0d
503 will inherit hardware debug registers from parent
504 on fork/vfork/clone. Newer Linux kernels create such tasks with
505 zeroed debug registers.
507 GDB core assumes the child inherits the watchpoints/hw
508 breakpoints of the parent, and will remove them all from the
509 forked off process. Copy the debug registers mirrors into the
510 new process so that all breakpoints and watchpoints can be
511 removed together. The debug registers mirror will become zeroed
512 in the end before detaching the forked off process, thus making
513 this compatible with older Linux kernels too. */
515 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
518 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
519 #define AARCH64_HWCAP_PACA (1 << 30)
521 /* Implementation of linux target ops method "low_arch_setup". */
524 aarch64_target::low_arch_setup ()
526 unsigned int machine
;
530 tid
= lwpid_of (current_thread
);
532 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
536 uint64_t vq
= aarch64_sve_get_vq (tid
);
537 unsigned long hwcap
= linux_get_hwcap (8);
538 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
540 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
543 current_process ()->tdesc
= aarch32_linux_read_description ();
545 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
548 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
551 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
553 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
556 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
559 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
561 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
564 static struct regset_info aarch64_regsets
[] =
566 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
567 sizeof (struct user_pt_regs
), GENERAL_REGS
,
568 aarch64_fill_gregset
, aarch64_store_gregset
},
569 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
570 sizeof (struct user_fpsimd_state
), FP_REGS
,
571 aarch64_fill_fpregset
, aarch64_store_fpregset
573 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
574 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
575 NULL
, aarch64_store_pauthregset
},
579 static struct regsets_info aarch64_regsets_info
=
581 aarch64_regsets
, /* regsets */
583 NULL
, /* disabled_regsets */
586 static struct regs_info regs_info_aarch64
=
588 NULL
, /* regset_bitmap */
590 &aarch64_regsets_info
,
593 static struct regset_info aarch64_sve_regsets
[] =
595 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
596 sizeof (struct user_pt_regs
), GENERAL_REGS
,
597 aarch64_fill_gregset
, aarch64_store_gregset
},
598 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
599 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
600 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
602 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
603 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
604 NULL
, aarch64_store_pauthregset
},
608 static struct regsets_info aarch64_sve_regsets_info
=
610 aarch64_sve_regsets
, /* regsets. */
611 0, /* num_regsets. */
612 NULL
, /* disabled_regsets. */
615 static struct regs_info regs_info_aarch64_sve
=
617 NULL
, /* regset_bitmap. */
619 &aarch64_sve_regsets_info
,
622 /* Implementation of linux_target_ops method "regs_info". */
624 static const struct regs_info
*
625 aarch64_regs_info (void)
627 if (!is_64bit_tdesc ())
628 return ®s_info_aarch32
;
631 return ®s_info_aarch64_sve
;
633 return ®s_info_aarch64
;
636 /* Implementation of linux_target_ops method "supports_tracepoints". */
639 aarch64_supports_tracepoints (void)
641 if (current_thread
== NULL
)
645 /* We don't support tracepoints on aarch32 now. */
646 return is_64bit_tdesc ();
650 /* Implementation of linux_target_ops method "get_thread_area". */
653 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
658 iovec
.iov_base
= ®
;
659 iovec
.iov_len
= sizeof (reg
);
661 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
669 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
672 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
674 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
680 collect_register_by_name (regcache
, "x8", &l_sysno
);
681 *sysno
= (int) l_sysno
;
684 collect_register_by_name (regcache
, "r7", sysno
);
687 /* List of condition codes that we need. */
689 enum aarch64_condition_codes
700 enum aarch64_operand_type
706 /* Representation of an operand. At this time, it only supports register
707 and immediate types. */
709 struct aarch64_operand
711 /* Type of the operand. */
712 enum aarch64_operand_type type
;
714 /* Value of the operand according to the type. */
718 struct aarch64_register reg
;
722 /* List of registers that we are currently using, we can add more here as
723 we need to use them. */
725 /* General purpose scratch registers (64 bit). */
726 static const struct aarch64_register x0
= { 0, 1 };
727 static const struct aarch64_register x1
= { 1, 1 };
728 static const struct aarch64_register x2
= { 2, 1 };
729 static const struct aarch64_register x3
= { 3, 1 };
730 static const struct aarch64_register x4
= { 4, 1 };
732 /* General purpose scratch registers (32 bit). */
733 static const struct aarch64_register w0
= { 0, 0 };
734 static const struct aarch64_register w2
= { 2, 0 };
736 /* Intra-procedure scratch registers. */
737 static const struct aarch64_register ip0
= { 16, 1 };
739 /* Special purpose registers. */
740 static const struct aarch64_register fp
= { 29, 1 };
741 static const struct aarch64_register lr
= { 30, 1 };
742 static const struct aarch64_register sp
= { 31, 1 };
743 static const struct aarch64_register xzr
= { 31, 1 };
745 /* Dynamically allocate a new register. If we know the register
746 statically, we should make it a global as above instead of using this
749 static struct aarch64_register
750 aarch64_register (unsigned num
, int is64
)
752 return (struct aarch64_register
) { num
, is64
};
755 /* Helper function to create a register operand, for instructions with
756 different types of operands.
759 p += emit_mov (p, x0, register_operand (x1)); */
761 static struct aarch64_operand
762 register_operand (struct aarch64_register reg
)
764 struct aarch64_operand operand
;
766 operand
.type
= OPERAND_REGISTER
;
772 /* Helper function to create an immediate operand, for instructions with
773 different types of operands.
776 p += emit_mov (p, x0, immediate_operand (12)); */
778 static struct aarch64_operand
779 immediate_operand (uint32_t imm
)
781 struct aarch64_operand operand
;
783 operand
.type
= OPERAND_IMMEDIATE
;
789 /* Helper function to create an offset memory operand.
792 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
794 static struct aarch64_memory_operand
795 offset_memory_operand (int32_t offset
)
797 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
800 /* Helper function to create a pre-index memory operand.
803 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
805 static struct aarch64_memory_operand
806 preindex_memory_operand (int32_t index
)
808 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
811 /* Helper function to create a post-index memory operand.
814 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
816 static struct aarch64_memory_operand
817 postindex_memory_operand (int32_t index
)
819 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
822 /* System control registers. These special registers can be written and
823 read with the MRS and MSR instructions.
825 - NZCV: Condition flags. GDB refers to this register under the CPSR
827 - FPSR: Floating-point status register.
828 - FPCR: Floating-point control registers.
829 - TPIDR_EL0: Software thread ID register. */
831 enum aarch64_system_control_registers
833 /* op0 op1 crn crm op2 */
834 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
835 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
836 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
837 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
840 /* Write a BLR instruction into *BUF.
844 RN is the register to branch to. */
847 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
849 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
852 /* Write a RET instruction into *BUF.
856 RN is the register to branch to. */
859 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
861 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
865 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
866 struct aarch64_register rt
,
867 struct aarch64_register rt2
,
868 struct aarch64_register rn
,
869 struct aarch64_memory_operand operand
)
876 opc
= ENCODE (2, 2, 30);
878 opc
= ENCODE (0, 2, 30);
880 switch (operand
.type
)
882 case MEMORY_OPERAND_OFFSET
:
884 pre_index
= ENCODE (1, 1, 24);
885 write_back
= ENCODE (0, 1, 23);
888 case MEMORY_OPERAND_POSTINDEX
:
890 pre_index
= ENCODE (0, 1, 24);
891 write_back
= ENCODE (1, 1, 23);
894 case MEMORY_OPERAND_PREINDEX
:
896 pre_index
= ENCODE (1, 1, 24);
897 write_back
= ENCODE (1, 1, 23);
904 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
905 | ENCODE (operand
.index
>> 3, 7, 15)
906 | ENCODE (rt2
.num
, 5, 10)
907 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
910 /* Write a STP instruction into *BUF.
912 STP rt, rt2, [rn, #offset]
913 STP rt, rt2, [rn, #index]!
914 STP rt, rt2, [rn], #index
916 RT and RT2 are the registers to store.
917 RN is the base address register.
918 OFFSET is the immediate to add to the base address. It is limited to a
919 -512 .. 504 range (7 bits << 3). */
922 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
923 struct aarch64_register rt2
, struct aarch64_register rn
,
924 struct aarch64_memory_operand operand
)
926 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
929 /* Write a LDP instruction into *BUF.
931 LDP rt, rt2, [rn, #offset]
932 LDP rt, rt2, [rn, #index]!
933 LDP rt, rt2, [rn], #index
935 RT and RT2 are the registers to store.
936 RN is the base address register.
937 OFFSET is the immediate to add to the base address. It is limited to a
938 -512 .. 504 range (7 bits << 3). */
941 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
942 struct aarch64_register rt2
, struct aarch64_register rn
,
943 struct aarch64_memory_operand operand
)
945 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
948 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
950 LDP qt, qt2, [rn, #offset]
952 RT and RT2 are the Q registers to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to
955 -1024 .. 1008 range (7 bits << 4). */
958 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
959 struct aarch64_register rn
, int32_t offset
)
961 uint32_t opc
= ENCODE (2, 2, 30);
962 uint32_t pre_index
= ENCODE (1, 1, 24);
964 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
965 | ENCODE (offset
>> 4, 7, 15)
966 | ENCODE (rt2
, 5, 10)
967 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
970 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
972 STP qt, qt2, [rn, #offset]
974 RT and RT2 are the Q registers to store.
975 RN is the base address register.
976 OFFSET is the immediate to add to the base address. It is limited to
977 -1024 .. 1008 range (7 bits << 4). */
980 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
981 struct aarch64_register rn
, int32_t offset
)
983 uint32_t opc
= ENCODE (2, 2, 30);
984 uint32_t pre_index
= ENCODE (1, 1, 24);
986 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
987 | ENCODE (offset
>> 4, 7, 15)
988 | ENCODE (rt2
, 5, 10)
989 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
992 /* Write a LDRH instruction into *BUF.
994 LDRH wt, [xn, #offset]
995 LDRH wt, [xn, #index]!
996 LDRH wt, [xn], #index
998 RT is the register to store.
999 RN is the base address register.
1000 OFFSET is the immediate to add to the base address. It is limited to
1001 0 .. 32760 range (12 bits << 3). */
1004 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1005 struct aarch64_register rn
,
1006 struct aarch64_memory_operand operand
)
1008 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1011 /* Write a LDRB instruction into *BUF.
1013 LDRB wt, [xn, #offset]
1014 LDRB wt, [xn, #index]!
1015 LDRB wt, [xn], #index
1017 RT is the register to store.
1018 RN is the base address register.
1019 OFFSET is the immediate to add to the base address. It is limited to
1020 0 .. 32760 range (12 bits << 3). */
1023 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1024 struct aarch64_register rn
,
1025 struct aarch64_memory_operand operand
)
1027 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1032 /* Write a STR instruction into *BUF.
1034 STR rt, [rn, #offset]
1035 STR rt, [rn, #index]!
1036 STR rt, [rn], #index
1038 RT is the register to store.
1039 RN is the base address register.
1040 OFFSET is the immediate to add to the base address. It is limited to
1041 0 .. 32760 range (12 bits << 3). */
1044 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1045 struct aarch64_register rn
,
1046 struct aarch64_memory_operand operand
)
1048 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1051 /* Helper function emitting an exclusive load or store instruction. */
1054 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1055 enum aarch64_opcodes opcode
,
1056 struct aarch64_register rs
,
1057 struct aarch64_register rt
,
1058 struct aarch64_register rt2
,
1059 struct aarch64_register rn
)
1061 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1062 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1063 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1066 /* Write a LAXR instruction into *BUF.
1070 RT is the destination register.
1071 RN is the base address register. */
1074 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1075 struct aarch64_register rn
)
1077 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1081 /* Write a STXR instruction into *BUF.
1085 RS is the result register, it indicates if the store succeeded or not.
1086 RT is the destination register.
1087 RN is the base address register. */
1090 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1091 struct aarch64_register rt
, struct aarch64_register rn
)
1093 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1097 /* Write a STLR instruction into *BUF.
1101 RT is the register to store.
1102 RN is the base address register. */
1105 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1106 struct aarch64_register rn
)
1108 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1112 /* Helper function for data processing instructions with register sources. */
1115 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1116 struct aarch64_register rd
,
1117 struct aarch64_register rn
,
1118 struct aarch64_register rm
)
1120 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1122 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1123 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1126 /* Helper function for data processing instructions taking either a register
1130 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1131 struct aarch64_register rd
,
1132 struct aarch64_register rn
,
1133 struct aarch64_operand operand
)
1135 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1136 /* The opcode is different for register and immediate source operands. */
1137 uint32_t operand_opcode
;
1139 if (operand
.type
== OPERAND_IMMEDIATE
)
1141 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1142 operand_opcode
= ENCODE (8, 4, 25);
1144 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1145 | ENCODE (operand
.imm
, 12, 10)
1146 | ENCODE (rn
.num
, 5, 5)
1147 | ENCODE (rd
.num
, 5, 0));
1151 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1152 operand_opcode
= ENCODE (5, 4, 25);
1154 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1159 /* Write an ADD instruction into *BUF.
1164 This function handles both an immediate and register add.
1166 RD is the destination register.
1167 RN is the input register.
1168 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1169 OPERAND_REGISTER. */
1172 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1173 struct aarch64_register rn
, struct aarch64_operand operand
)
1175 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1178 /* Write a SUB instruction into *BUF.
1183 This function handles both an immediate and register sub.
1185 RD is the destination register.
1186 RN is the input register.
1187 IMM is the immediate to substract to RN. */
1190 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1191 struct aarch64_register rn
, struct aarch64_operand operand
)
1193 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1196 /* Write a MOV instruction into *BUF.
1201 This function handles both a wide immediate move and a register move,
1202 with the condition that the source register is not xzr. xzr and the
1203 stack pointer share the same encoding and this function only supports
1206 RD is the destination register.
1207 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1208 OPERAND_REGISTER. */
1211 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1212 struct aarch64_operand operand
)
1214 if (operand
.type
== OPERAND_IMMEDIATE
)
1216 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1217 /* Do not shift the immediate. */
1218 uint32_t shift
= ENCODE (0, 2, 21);
1220 return aarch64_emit_insn (buf
, MOV
| size
| shift
1221 | ENCODE (operand
.imm
, 16, 5)
1222 | ENCODE (rd
.num
, 5, 0));
1225 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1228 /* Write a MOVK instruction into *BUF.
1230 MOVK rd, #imm, lsl #shift
1232 RD is the destination register.
1233 IMM is the immediate.
1234 SHIFT is the logical shift left to apply to IMM. */
1237 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1240 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1242 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1243 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1246 /* Write instructions into *BUF in order to move ADDR into a register.
1247 ADDR can be a 64-bit value.
1249 This function will emit a series of MOV and MOVK instructions, such as:
1252 MOVK xd, #(addr >> 16), lsl #16
1253 MOVK xd, #(addr >> 32), lsl #32
1254 MOVK xd, #(addr >> 48), lsl #48 */
1257 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1261 /* The MOV (wide immediate) instruction clears to top bits of the
1263 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1265 if ((addr
>> 16) != 0)
1266 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1270 if ((addr
>> 32) != 0)
1271 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1275 if ((addr
>> 48) != 0)
1276 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1281 /* Write a SUBS instruction into *BUF.
1285 This instruction update the condition flags.
1287 RD is the destination register.
1288 RN and RM are the source registers. */
1291 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1292 struct aarch64_register rn
, struct aarch64_operand operand
)
1294 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1297 /* Write a CMP instruction into *BUF.
1301 This instruction is an alias of SUBS xzr, rn, rm.
1303 RN and RM are the registers to compare. */
1306 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1307 struct aarch64_operand operand
)
1309 return emit_subs (buf
, xzr
, rn
, operand
);
1312 /* Write a AND instruction into *BUF.
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1320 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1321 struct aarch64_register rn
, struct aarch64_register rm
)
1323 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1326 /* Write a ORR instruction into *BUF.
1330 RD is the destination register.
1331 RN and RM are the source registers. */
1334 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1335 struct aarch64_register rn
, struct aarch64_register rm
)
1337 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1340 /* Write a ORN instruction into *BUF.
1344 RD is the destination register.
1345 RN and RM are the source registers. */
1348 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1349 struct aarch64_register rn
, struct aarch64_register rm
)
1351 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1354 /* Write a EOR instruction into *BUF.
1358 RD is the destination register.
1359 RN and RM are the source registers. */
1362 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1363 struct aarch64_register rn
, struct aarch64_register rm
)
1365 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1368 /* Write a MVN instruction into *BUF.
1372 This is an alias for ORN rd, xzr, rm.
1374 RD is the destination register.
1375 RM is the source register. */
1378 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1379 struct aarch64_register rm
)
1381 return emit_orn (buf
, rd
, xzr
, rm
);
1384 /* Write a LSLV instruction into *BUF.
1388 RD is the destination register.
1389 RN and RM are the source registers. */
1392 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1393 struct aarch64_register rn
, struct aarch64_register rm
)
1395 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1398 /* Write a LSRV instruction into *BUF.
1402 RD is the destination register.
1403 RN and RM are the source registers. */
1406 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1407 struct aarch64_register rn
, struct aarch64_register rm
)
1409 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1412 /* Write a ASRV instruction into *BUF.
1416 RD is the destination register.
1417 RN and RM are the source registers. */
1420 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1421 struct aarch64_register rn
, struct aarch64_register rm
)
1423 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1426 /* Write a MUL instruction into *BUF.
1430 RD is the destination register.
1431 RN and RM are the source registers. */
1434 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1435 struct aarch64_register rn
, struct aarch64_register rm
)
1437 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1440 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1444 RT is the destination register.
1445 SYSTEM_REG is special purpose register to read. */
1448 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1449 enum aarch64_system_control_registers system_reg
)
1451 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1452 | ENCODE (rt
.num
, 5, 0));
1455 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1459 SYSTEM_REG is special purpose register to write.
1460 RT is the input register. */
1463 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1464 struct aarch64_register rt
)
1466 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1467 | ENCODE (rt
.num
, 5, 0));
1470 /* Write a SEVL instruction into *BUF.
1472 This is a hint instruction telling the hardware to trigger an event. */
1475 emit_sevl (uint32_t *buf
)
1477 return aarch64_emit_insn (buf
, SEVL
);
1480 /* Write a WFE instruction into *BUF.
1482 This is a hint instruction telling the hardware to wait for an event. */
1485 emit_wfe (uint32_t *buf
)
1487 return aarch64_emit_insn (buf
, WFE
);
1490 /* Write a SBFM instruction into *BUF.
1492 SBFM rd, rn, #immr, #imms
1494 This instruction moves the bits from #immr to #imms into the
1495 destination, sign extending the result.
1497 RD is the destination register.
1498 RN is the source register.
1499 IMMR is the bit number to start at (least significant bit).
1500 IMMS is the bit number to stop at (most significant bit). */
1503 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1504 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1506 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1507 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1509 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1510 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1511 | ENCODE (rd
.num
, 5, 0));
1514 /* Write a SBFX instruction into *BUF.
1516 SBFX rd, rn, #lsb, #width
1518 This instruction moves #width bits from #lsb into the destination, sign
1519 extending the result. This is an alias for:
1521 SBFM rd, rn, #lsb, #(lsb + width - 1)
1523 RD is the destination register.
1524 RN is the source register.
1525 LSB is the bit number to start at (least significant bit).
1526 WIDTH is the number of bits to move. */
1529 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1530 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1532 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1535 /* Write a UBFM instruction into *BUF.
1537 UBFM rd, rn, #immr, #imms
1539 This instruction moves the bits from #immr to #imms into the
1540 destination, extending the result with zeros.
1542 RD is the destination register.
1543 RN is the source register.
1544 IMMR is the bit number to start at (least significant bit).
1545 IMMS is the bit number to stop at (most significant bit). */
1548 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1549 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1551 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1552 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1554 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1555 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1556 | ENCODE (rd
.num
, 5, 0));
1559 /* Write a UBFX instruction into *BUF.
1561 UBFX rd, rn, #lsb, #width
1563 This instruction moves #width bits from #lsb into the destination,
1564 extending the result with zeros. This is an alias for:
1566 UBFM rd, rn, #lsb, #(lsb + width - 1)
1568 RD is the destination register.
1569 RN is the source register.
1570 LSB is the bit number to start at (least significant bit).
1571 WIDTH is the number of bits to move. */
1574 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1575 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1577 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1580 /* Write a CSINC instruction into *BUF.
1582 CSINC rd, rn, rm, cond
1584 This instruction conditionally increments rn or rm and places the result
1585 in rd. rn is chosen is the condition is true.
1587 RD is the destination register.
1588 RN and RM are the source registers.
1589 COND is the encoded condition. */
1592 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1593 struct aarch64_register rn
, struct aarch64_register rm
,
1596 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1598 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1599 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1600 | ENCODE (rd
.num
, 5, 0));
1603 /* Write a CSET instruction into *BUF.
1607 This instruction conditionally write 1 or 0 in the destination register.
1608 1 is written if the condition is true. This is an alias for:
1610 CSINC rd, xzr, xzr, !cond
1612 Note that the condition needs to be inverted.
1614 RD is the destination register.
1615 RN and RM are the source registers.
1616 COND is the encoded condition. */
1619 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1621 /* The least significant bit of the condition needs toggling in order to
1623 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1626 /* Write LEN instructions from BUF into the inferior memory at *TO.
1628 Note instructions are always little endian on AArch64, unlike data. */
1631 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1633 size_t byte_len
= len
* sizeof (uint32_t);
1634 #if (__BYTE_ORDER == __BIG_ENDIAN)
1635 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1638 for (i
= 0; i
< len
; i
++)
1639 le_buf
[i
] = htole32 (buf
[i
]);
1641 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1645 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1651 /* Sub-class of struct aarch64_insn_data, store information of
1652 instruction relocation for fast tracepoint. Visitor can
1653 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1654 the relocated instructions in buffer pointed by INSN_PTR. */
1656 struct aarch64_insn_relocation_data
1658 struct aarch64_insn_data base
;
1660 /* The new address the instruction is relocated to. */
1662 /* Pointer to the buffer of relocated instruction(s). */
1666 /* Implementation of aarch64_insn_visitor method "b". */
1669 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1670 struct aarch64_insn_data
*data
)
1672 struct aarch64_insn_relocation_data
*insn_reloc
1673 = (struct aarch64_insn_relocation_data
*) data
;
1675 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1677 if (can_encode_int32 (new_offset
, 28))
1678 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1681 /* Implementation of aarch64_insn_visitor method "b_cond". */
1684 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1685 struct aarch64_insn_data
*data
)
1687 struct aarch64_insn_relocation_data
*insn_reloc
1688 = (struct aarch64_insn_relocation_data
*) data
;
1690 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1692 if (can_encode_int32 (new_offset
, 21))
1694 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1697 else if (can_encode_int32 (new_offset
, 28))
1699 /* The offset is out of range for a conditional branch
1700 instruction but not for a unconditional branch. We can use
1701 the following instructions instead:
1703 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1704 B NOT_TAKEN ; Else jump over TAKEN and continue.
1711 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1712 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1713 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1717 /* Implementation of aarch64_insn_visitor method "cb". */
1720 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1721 const unsigned rn
, int is64
,
1722 struct aarch64_insn_data
*data
)
1724 struct aarch64_insn_relocation_data
*insn_reloc
1725 = (struct aarch64_insn_relocation_data
*) data
;
1727 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1729 if (can_encode_int32 (new_offset
, 21))
1731 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1732 aarch64_register (rn
, is64
), new_offset
);
1734 else if (can_encode_int32 (new_offset
, 28))
1736 /* The offset is out of range for a compare and branch
1737 instruction but not for a unconditional branch. We can use
1738 the following instructions instead:
1740 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1741 B NOT_TAKEN ; Else jump over TAKEN and continue.
1747 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1748 aarch64_register (rn
, is64
), 8);
1749 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1750 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1754 /* Implementation of aarch64_insn_visitor method "tb". */
1757 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1758 const unsigned rt
, unsigned bit
,
1759 struct aarch64_insn_data
*data
)
1761 struct aarch64_insn_relocation_data
*insn_reloc
1762 = (struct aarch64_insn_relocation_data
*) data
;
1764 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1766 if (can_encode_int32 (new_offset
, 16))
1768 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1769 aarch64_register (rt
, 1), new_offset
);
1771 else if (can_encode_int32 (new_offset
, 28))
1773 /* The offset is out of range for a test bit and branch
1774 instruction but not for a unconditional branch. We can use
1775 the following instructions instead:
1777 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1778 B NOT_TAKEN ; Else jump over TAKEN and continue.
1784 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1785 aarch64_register (rt
, 1), 8);
1786 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1787 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1792 /* Implementation of aarch64_insn_visitor method "adr". */
1795 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1797 struct aarch64_insn_data
*data
)
1799 struct aarch64_insn_relocation_data
*insn_reloc
1800 = (struct aarch64_insn_relocation_data
*) data
;
1801 /* We know exactly the address the ADR{P,} instruction will compute.
1802 We can just write it to the destination register. */
1803 CORE_ADDR address
= data
->insn_addr
+ offset
;
1807 /* Clear the lower 12 bits of the offset to get the 4K page. */
1808 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1809 aarch64_register (rd
, 1),
1813 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1814 aarch64_register (rd
, 1), address
);
1817 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1820 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1821 const unsigned rt
, const int is64
,
1822 struct aarch64_insn_data
*data
)
1824 struct aarch64_insn_relocation_data
*insn_reloc
1825 = (struct aarch64_insn_relocation_data
*) data
;
1826 CORE_ADDR address
= data
->insn_addr
+ offset
;
1828 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1829 aarch64_register (rt
, 1), address
);
1831 /* We know exactly what address to load from, and what register we
1834 MOV xd, #(oldloc + offset)
1835 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1838 LDR xd, [xd] ; or LDRSW xd, [xd]
1843 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1844 aarch64_register (rt
, 1),
1845 aarch64_register (rt
, 1),
1846 offset_memory_operand (0));
1848 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1849 aarch64_register (rt
, is64
),
1850 aarch64_register (rt
, 1),
1851 offset_memory_operand (0));
1854 /* Implementation of aarch64_insn_visitor method "others". */
1857 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1858 struct aarch64_insn_data
*data
)
1860 struct aarch64_insn_relocation_data
*insn_reloc
1861 = (struct aarch64_insn_relocation_data
*) data
;
1863 /* The instruction is not PC relative. Just re-emit it at the new
1865 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1868 static const struct aarch64_insn_visitor visitor
=
1870 aarch64_ftrace_insn_reloc_b
,
1871 aarch64_ftrace_insn_reloc_b_cond
,
1872 aarch64_ftrace_insn_reloc_cb
,
1873 aarch64_ftrace_insn_reloc_tb
,
1874 aarch64_ftrace_insn_reloc_adr
,
1875 aarch64_ftrace_insn_reloc_ldr_literal
,
1876 aarch64_ftrace_insn_reloc_others
,
1879 /* Implementation of linux_target_ops method
1880 "install_fast_tracepoint_jump_pad". */
1883 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1885 CORE_ADDR collector
,
1888 CORE_ADDR
*jump_entry
,
1889 CORE_ADDR
*trampoline
,
1890 ULONGEST
*trampoline_size
,
1891 unsigned char *jjump_pad_insn
,
1892 ULONGEST
*jjump_pad_insn_size
,
1893 CORE_ADDR
*adjusted_insn_addr
,
1894 CORE_ADDR
*adjusted_insn_addr_end
,
1902 CORE_ADDR buildaddr
= *jump_entry
;
1903 struct aarch64_insn_relocation_data insn_data
;
1905 /* We need to save the current state on the stack both to restore it
1906 later and to collect register values when the tracepoint is hit.
1908 The saved registers are pushed in a layout that needs to be in sync
1909 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1910 the supply_fast_tracepoint_registers function will fill in the
1911 register cache from a pointer to saved registers on the stack we build
1914 For simplicity, we set the size of each cell on the stack to 16 bytes.
1915 This way one cell can hold any register type, from system registers
1916 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1917 has to be 16 bytes aligned anyway.
1919 Note that the CPSR register does not exist on AArch64. Instead we
1920 can access system bits describing the process state with the
1921 MRS/MSR instructions, namely the condition flags. We save them as
1922 if they are part of a CPSR register because that's how GDB
1923 interprets these system bits. At the moment, only the condition
1924 flags are saved in CPSR (NZCV).
1926 Stack layout, each cell is 16 bytes (descending):
1928 High *-------- SIMD&FP registers from 31 down to 0. --------*
1934 *---- General purpose registers from 30 down to 0. ----*
1940 *------------- Special purpose registers. -------------*
1943 | CPSR (NZCV) | 5 cells
1946 *------------- collecting_t object --------------------*
1947 | TPIDR_EL0 | struct tracepoint * |
1948 Low *------------------------------------------------------*
1950 After this stack is set up, we issue a call to the collector, passing
1951 it the saved registers at (SP + 16). */
1953 /* Push SIMD&FP registers on the stack:
1955 SUB sp, sp, #(32 * 16)
1957 STP q30, q31, [sp, #(30 * 16)]
1962 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1963 for (i
= 30; i
>= 0; i
-= 2)
1964 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1966 /* Push general purpose registers on the stack. Note that we do not need
1967 to push x31 as it represents the xzr register and not the stack
1968 pointer in a STR instruction.
1970 SUB sp, sp, #(31 * 16)
1972 STR x30, [sp, #(30 * 16)]
1977 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1978 for (i
= 30; i
>= 0; i
-= 1)
1979 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1980 offset_memory_operand (i
* 16));
1982 /* Make space for 5 more cells.
1984 SUB sp, sp, #(5 * 16)
1987 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1992 ADD x4, sp, #((32 + 31 + 5) * 16)
1993 STR x4, [sp, #(4 * 16)]
1996 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1997 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1999 /* Save PC (tracepoint address):
2004 STR x3, [sp, #(3 * 16)]
2008 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2009 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2011 /* Save CPSR (NZCV), FPSR and FPCR:
2017 STR x2, [sp, #(2 * 16)]
2018 STR x1, [sp, #(1 * 16)]
2019 STR x0, [sp, #(0 * 16)]
2022 p
+= emit_mrs (p
, x2
, NZCV
);
2023 p
+= emit_mrs (p
, x1
, FPSR
);
2024 p
+= emit_mrs (p
, x0
, FPCR
);
2025 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2026 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2027 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2029 /* Push the collecting_t object. It consist of the address of the
2030 tracepoint and an ID for the current thread. We get the latter by
2031 reading the tpidr_el0 system register. It corresponds to the
2032 NT_ARM_TLS register accessible with ptrace.
2039 STP x0, x1, [sp, #-16]!
2043 p
+= emit_mov_addr (p
, x0
, tpoint
);
2044 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2045 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2049 The shared memory for the lock is at lockaddr. It will hold zero
2050 if no-one is holding the lock, otherwise it contains the address of
2051 the collecting_t object on the stack of the thread which acquired it.
2053 At this stage, the stack pointer points to this thread's collecting_t
2056 We use the following registers:
2057 - x0: Address of the lock.
2058 - x1: Pointer to collecting_t object.
2059 - x2: Scratch register.
2065 ; Trigger an event local to this core. So the following WFE
2066 ; instruction is ignored.
2069 ; Wait for an event. The event is triggered by either the SEVL
2070 ; or STLR instructions (store release).
2073 ; Atomically read at lockaddr. This marks the memory location as
2074 ; exclusive. This instruction also has memory constraints which
2075 ; make sure all previous data reads and writes are done before
2079 ; Try again if another thread holds the lock.
2082 ; We can lock it! Write the address of the collecting_t object.
2083 ; This instruction will fail if the memory location is not marked
2084 ; as exclusive anymore. If it succeeds, it will remove the
2085 ; exclusive mark on the memory location. This way, if another
2086 ; thread executes this instruction before us, we will fail and try
2093 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2094 p
+= emit_mov (p
, x1
, register_operand (sp
));
2098 p
+= emit_ldaxr (p
, x2
, x0
);
2099 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2100 p
+= emit_stxr (p
, w2
, x1
, x0
);
2101 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2103 /* Call collector (struct tracepoint *, unsigned char *):
2108 ; Saved registers start after the collecting_t object.
2111 ; We use an intra-procedure-call scratch register.
2112 MOV ip0, #(collector)
2115 ; And call back to C!
2120 p
+= emit_mov_addr (p
, x0
, tpoint
);
2121 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2123 p
+= emit_mov_addr (p
, ip0
, collector
);
2124 p
+= emit_blr (p
, ip0
);
2126 /* Release the lock.
2131 ; This instruction is a normal store with memory ordering
2132 ; constraints. Thanks to this we do not have to put a data
2133 ; barrier instruction to make sure all data read and writes are done
2134 ; before this instruction is executed. Furthermore, this instruction
2135 ; will trigger an event, letting other threads know they can grab
2140 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2141 p
+= emit_stlr (p
, xzr
, x0
);
2143 /* Free collecting_t object:
2148 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2150 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2151 registers from the stack.
2153 LDR x2, [sp, #(2 * 16)]
2154 LDR x1, [sp, #(1 * 16)]
2155 LDR x0, [sp, #(0 * 16)]
2161 ADD sp, sp #(5 * 16)
2164 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2165 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2166 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2167 p
+= emit_msr (p
, NZCV
, x2
);
2168 p
+= emit_msr (p
, FPSR
, x1
);
2169 p
+= emit_msr (p
, FPCR
, x0
);
2171 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2173 /* Pop general purpose registers:
2177 LDR x30, [sp, #(30 * 16)]
2179 ADD sp, sp, #(31 * 16)
2182 for (i
= 0; i
<= 30; i
+= 1)
2183 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2184 offset_memory_operand (i
* 16));
2185 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2187 /* Pop SIMD&FP registers:
2191 LDP q30, q31, [sp, #(30 * 16)]
2193 ADD sp, sp, #(32 * 16)
2196 for (i
= 0; i
<= 30; i
+= 2)
2197 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2198 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2200 /* Write the code into the inferior memory. */
2201 append_insns (&buildaddr
, p
- buf
, buf
);
2203 /* Now emit the relocated instruction. */
2204 *adjusted_insn_addr
= buildaddr
;
2205 target_read_uint32 (tpaddr
, &insn
);
2207 insn_data
.base
.insn_addr
= tpaddr
;
2208 insn_data
.new_addr
= buildaddr
;
2209 insn_data
.insn_ptr
= buf
;
2211 aarch64_relocate_instruction (insn
, &visitor
,
2212 (struct aarch64_insn_data
*) &insn_data
);
2214 /* We may not have been able to relocate the instruction. */
2215 if (insn_data
.insn_ptr
== buf
)
2218 "E.Could not relocate instruction from %s to %s.",
2219 core_addr_to_string_nz (tpaddr
),
2220 core_addr_to_string_nz (buildaddr
));
2224 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2225 *adjusted_insn_addr_end
= buildaddr
;
2227 /* Go back to the start of the buffer. */
2230 /* Emit a branch back from the jump pad. */
2231 offset
= (tpaddr
+ orig_size
- buildaddr
);
2232 if (!can_encode_int32 (offset
, 28))
2235 "E.Jump back from jump pad too far from tracepoint "
2236 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2241 p
+= emit_b (p
, 0, offset
);
2242 append_insns (&buildaddr
, p
- buf
, buf
);
2244 /* Give the caller a branch instruction into the jump pad. */
2245 offset
= (*jump_entry
- tpaddr
);
2246 if (!can_encode_int32 (offset
, 28))
2249 "E.Jump pad too far from tracepoint "
2250 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2255 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2256 *jjump_pad_insn_size
= 4;
2258 /* Return the end address of our pad. */
2259 *jump_entry
= buildaddr
;
2264 /* Helper function writing LEN instructions from START into
2265 current_insn_ptr. */
2268 emit_ops_insns (const uint32_t *start
, int len
)
2270 CORE_ADDR buildaddr
= current_insn_ptr
;
2273 debug_printf ("Adding %d instrucions at %s\n",
2274 len
, paddress (buildaddr
));
2276 append_insns (&buildaddr
, len
, start
);
2277 current_insn_ptr
= buildaddr
;
2280 /* Pop a register from the stack. */
2283 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2285 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2288 /* Push a register on the stack. */
2291 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2293 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2296 /* Implementation of emit_ops method "emit_prologue". */
2299 aarch64_emit_prologue (void)
2304 /* This function emit a prologue for the following function prototype:
2306 enum eval_result_type f (unsigned char *regs,
2309 The first argument is a buffer of raw registers. The second
2310 argument is the result of
2311 evaluating the expression, which will be set to whatever is on top of
2312 the stack at the end.
2314 The stack set up by the prologue is as such:
2316 High *------------------------------------------------------*
2319 | x1 (ULONGEST *value) |
2320 | x0 (unsigned char *regs) |
2321 Low *------------------------------------------------------*
2323 As we are implementing a stack machine, each opcode can expand the
2324 stack so we never know how far we are from the data saved by this
2325 prologue. In order to be able refer to value and regs later, we save
2326 the current stack pointer in the frame pointer. This way, it is not
2327 clobbered when calling C functions.
2329 Finally, throughout every operation, we are using register x0 as the
2330 top of the stack, and x1 as a scratch register. */
2332 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2333 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2334 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2336 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2339 emit_ops_insns (buf
, p
- buf
);
2342 /* Implementation of emit_ops method "emit_epilogue". */
2345 aarch64_emit_epilogue (void)
2350 /* Store the result of the expression (x0) in *value. */
2351 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2352 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2353 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2355 /* Restore the previous state. */
2356 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2357 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2359 /* Return expr_eval_no_error. */
2360 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2361 p
+= emit_ret (p
, lr
);
2363 emit_ops_insns (buf
, p
- buf
);
2366 /* Implementation of emit_ops method "emit_add". */
2369 aarch64_emit_add (void)
2374 p
+= emit_pop (p
, x1
);
2375 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2377 emit_ops_insns (buf
, p
- buf
);
2380 /* Implementation of emit_ops method "emit_sub". */
2383 aarch64_emit_sub (void)
2388 p
+= emit_pop (p
, x1
);
2389 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2391 emit_ops_insns (buf
, p
- buf
);
2394 /* Implementation of emit_ops method "emit_mul". */
2397 aarch64_emit_mul (void)
2402 p
+= emit_pop (p
, x1
);
2403 p
+= emit_mul (p
, x0
, x1
, x0
);
2405 emit_ops_insns (buf
, p
- buf
);
2408 /* Implementation of emit_ops method "emit_lsh". */
2411 aarch64_emit_lsh (void)
2416 p
+= emit_pop (p
, x1
);
2417 p
+= emit_lslv (p
, x0
, x1
, x0
);
2419 emit_ops_insns (buf
, p
- buf
);
2422 /* Implementation of emit_ops method "emit_rsh_signed". */
2425 aarch64_emit_rsh_signed (void)
2430 p
+= emit_pop (p
, x1
);
2431 p
+= emit_asrv (p
, x0
, x1
, x0
);
2433 emit_ops_insns (buf
, p
- buf
);
2436 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2439 aarch64_emit_rsh_unsigned (void)
2444 p
+= emit_pop (p
, x1
);
2445 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2447 emit_ops_insns (buf
, p
- buf
);
2450 /* Implementation of emit_ops method "emit_ext". */
2453 aarch64_emit_ext (int arg
)
2458 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2460 emit_ops_insns (buf
, p
- buf
);
2463 /* Implementation of emit_ops method "emit_log_not". */
2466 aarch64_emit_log_not (void)
2471 /* If the top of the stack is 0, replace it with 1. Else replace it with
2474 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2475 p
+= emit_cset (p
, x0
, EQ
);
2477 emit_ops_insns (buf
, p
- buf
);
2480 /* Implementation of emit_ops method "emit_bit_and". */
2483 aarch64_emit_bit_and (void)
2488 p
+= emit_pop (p
, x1
);
2489 p
+= emit_and (p
, x0
, x0
, x1
);
2491 emit_ops_insns (buf
, p
- buf
);
2494 /* Implementation of emit_ops method "emit_bit_or". */
2497 aarch64_emit_bit_or (void)
2502 p
+= emit_pop (p
, x1
);
2503 p
+= emit_orr (p
, x0
, x0
, x1
);
2505 emit_ops_insns (buf
, p
- buf
);
2508 /* Implementation of emit_ops method "emit_bit_xor". */
2511 aarch64_emit_bit_xor (void)
2516 p
+= emit_pop (p
, x1
);
2517 p
+= emit_eor (p
, x0
, x0
, x1
);
2519 emit_ops_insns (buf
, p
- buf
);
2522 /* Implementation of emit_ops method "emit_bit_not". */
2525 aarch64_emit_bit_not (void)
2530 p
+= emit_mvn (p
, x0
, x0
);
2532 emit_ops_insns (buf
, p
- buf
);
2535 /* Implementation of emit_ops method "emit_equal". */
2538 aarch64_emit_equal (void)
2543 p
+= emit_pop (p
, x1
);
2544 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2545 p
+= emit_cset (p
, x0
, EQ
);
2547 emit_ops_insns (buf
, p
- buf
);
2550 /* Implementation of emit_ops method "emit_less_signed". */
2553 aarch64_emit_less_signed (void)
2558 p
+= emit_pop (p
, x1
);
2559 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2560 p
+= emit_cset (p
, x0
, LT
);
2562 emit_ops_insns (buf
, p
- buf
);
2565 /* Implementation of emit_ops method "emit_less_unsigned". */
2568 aarch64_emit_less_unsigned (void)
2573 p
+= emit_pop (p
, x1
);
2574 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2575 p
+= emit_cset (p
, x0
, LO
);
2577 emit_ops_insns (buf
, p
- buf
);
2580 /* Implementation of emit_ops method "emit_ref". */
2583 aarch64_emit_ref (int size
)
2591 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2594 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2597 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2600 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2603 /* Unknown size, bail on compilation. */
2608 emit_ops_insns (buf
, p
- buf
);
2611 /* Implementation of emit_ops method "emit_if_goto". */
2614 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2619 /* The Z flag is set or cleared here. */
2620 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2621 /* This instruction must not change the Z flag. */
2622 p
+= emit_pop (p
, x0
);
2623 /* Branch over the next instruction if x0 == 0. */
2624 p
+= emit_bcond (p
, EQ
, 8);
2626 /* The NOP instruction will be patched with an unconditional branch. */
2628 *offset_p
= (p
- buf
) * 4;
2633 emit_ops_insns (buf
, p
- buf
);
2636 /* Implementation of emit_ops method "emit_goto". */
2639 aarch64_emit_goto (int *offset_p
, int *size_p
)
2644 /* The NOP instruction will be patched with an unconditional branch. */
2651 emit_ops_insns (buf
, p
- buf
);
2654 /* Implementation of emit_ops method "write_goto_address". */
2657 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2661 emit_b (&insn
, 0, to
- from
);
2662 append_insns (&from
, 1, &insn
);
2665 /* Implementation of emit_ops method "emit_const". */
2668 aarch64_emit_const (LONGEST num
)
2673 p
+= emit_mov_addr (p
, x0
, num
);
2675 emit_ops_insns (buf
, p
- buf
);
2678 /* Implementation of emit_ops method "emit_call". */
2681 aarch64_emit_call (CORE_ADDR fn
)
2686 p
+= emit_mov_addr (p
, ip0
, fn
);
2687 p
+= emit_blr (p
, ip0
);
2689 emit_ops_insns (buf
, p
- buf
);
2692 /* Implementation of emit_ops method "emit_reg". */
2695 aarch64_emit_reg (int reg
)
2700 /* Set x0 to unsigned char *regs. */
2701 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2702 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2703 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2705 emit_ops_insns (buf
, p
- buf
);
2707 aarch64_emit_call (get_raw_reg_func_addr ());
2710 /* Implementation of emit_ops method "emit_pop". */
2713 aarch64_emit_pop (void)
2718 p
+= emit_pop (p
, x0
);
2720 emit_ops_insns (buf
, p
- buf
);
2723 /* Implementation of emit_ops method "emit_stack_flush". */
2726 aarch64_emit_stack_flush (void)
2731 p
+= emit_push (p
, x0
);
2733 emit_ops_insns (buf
, p
- buf
);
2736 /* Implementation of emit_ops method "emit_zero_ext". */
2739 aarch64_emit_zero_ext (int arg
)
2744 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2746 emit_ops_insns (buf
, p
- buf
);
2749 /* Implementation of emit_ops method "emit_swap". */
2752 aarch64_emit_swap (void)
2757 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2758 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2759 p
+= emit_mov (p
, x0
, register_operand (x1
));
2761 emit_ops_insns (buf
, p
- buf
);
2764 /* Implementation of emit_ops method "emit_stack_adjust". */
2767 aarch64_emit_stack_adjust (int n
)
2769 /* This is not needed with our design. */
2773 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2775 emit_ops_insns (buf
, p
- buf
);
2778 /* Implementation of emit_ops method "emit_int_call_1". */
2781 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2786 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2788 emit_ops_insns (buf
, p
- buf
);
2790 aarch64_emit_call (fn
);
2793 /* Implementation of emit_ops method "emit_void_call_2". */
2796 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2801 /* Push x0 on the stack. */
2802 aarch64_emit_stack_flush ();
2804 /* Setup arguments for the function call:
2807 x1: top of the stack
2812 p
+= emit_mov (p
, x1
, register_operand (x0
));
2813 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2815 emit_ops_insns (buf
, p
- buf
);
2817 aarch64_emit_call (fn
);
2820 aarch64_emit_pop ();
2823 /* Implementation of emit_ops method "emit_eq_goto". */
2826 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2831 p
+= emit_pop (p
, x1
);
2832 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2833 /* Branch over the next instruction if x0 != x1. */
2834 p
+= emit_bcond (p
, NE
, 8);
2835 /* The NOP instruction will be patched with an unconditional branch. */
2837 *offset_p
= (p
- buf
) * 4;
2842 emit_ops_insns (buf
, p
- buf
);
2845 /* Implementation of emit_ops method "emit_ne_goto". */
2848 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2853 p
+= emit_pop (p
, x1
);
2854 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2855 /* Branch over the next instruction if x0 == x1. */
2856 p
+= emit_bcond (p
, EQ
, 8);
2857 /* The NOP instruction will be patched with an unconditional branch. */
2859 *offset_p
= (p
- buf
) * 4;
2864 emit_ops_insns (buf
, p
- buf
);
2867 /* Implementation of emit_ops method "emit_lt_goto". */
2870 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2875 p
+= emit_pop (p
, x1
);
2876 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2877 /* Branch over the next instruction if x0 >= x1. */
2878 p
+= emit_bcond (p
, GE
, 8);
2879 /* The NOP instruction will be patched with an unconditional branch. */
2881 *offset_p
= (p
- buf
) * 4;
2886 emit_ops_insns (buf
, p
- buf
);
2889 /* Implementation of emit_ops method "emit_le_goto". */
2892 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2897 p
+= emit_pop (p
, x1
);
2898 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2899 /* Branch over the next instruction if x0 > x1. */
2900 p
+= emit_bcond (p
, GT
, 8);
2901 /* The NOP instruction will be patched with an unconditional branch. */
2903 *offset_p
= (p
- buf
) * 4;
2908 emit_ops_insns (buf
, p
- buf
);
2911 /* Implementation of emit_ops method "emit_gt_goto". */
2914 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2919 p
+= emit_pop (p
, x1
);
2920 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2921 /* Branch over the next instruction if x0 <= x1. */
2922 p
+= emit_bcond (p
, LE
, 8);
2923 /* The NOP instruction will be patched with an unconditional branch. */
2925 *offset_p
= (p
- buf
) * 4;
2930 emit_ops_insns (buf
, p
- buf
);
2933 /* Implementation of emit_ops method "emit_ge_got". */
2936 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2941 p
+= emit_pop (p
, x1
);
2942 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2943 /* Branch over the next instruction if x0 <= x1. */
2944 p
+= emit_bcond (p
, LT
, 8);
2945 /* The NOP instruction will be patched with an unconditional branch. */
2947 *offset_p
= (p
- buf
) * 4;
2952 emit_ops_insns (buf
, p
- buf
);
2955 static struct emit_ops aarch64_emit_ops_impl
=
2957 aarch64_emit_prologue
,
2958 aarch64_emit_epilogue
,
2963 aarch64_emit_rsh_signed
,
2964 aarch64_emit_rsh_unsigned
,
2966 aarch64_emit_log_not
,
2967 aarch64_emit_bit_and
,
2968 aarch64_emit_bit_or
,
2969 aarch64_emit_bit_xor
,
2970 aarch64_emit_bit_not
,
2972 aarch64_emit_less_signed
,
2973 aarch64_emit_less_unsigned
,
2975 aarch64_emit_if_goto
,
2977 aarch64_write_goto_address
,
2982 aarch64_emit_stack_flush
,
2983 aarch64_emit_zero_ext
,
2985 aarch64_emit_stack_adjust
,
2986 aarch64_emit_int_call_1
,
2987 aarch64_emit_void_call_2
,
2988 aarch64_emit_eq_goto
,
2989 aarch64_emit_ne_goto
,
2990 aarch64_emit_lt_goto
,
2991 aarch64_emit_le_goto
,
2992 aarch64_emit_gt_goto
,
2993 aarch64_emit_ge_got
,
2996 /* Implementation of linux_target_ops method "emit_ops". */
2998 static struct emit_ops
*
2999 aarch64_emit_ops (void)
3001 return &aarch64_emit_ops_impl
;
3004 /* Implementation of linux_target_ops method
3005 "get_min_fast_tracepoint_insn_len". */
3008 aarch64_get_min_fast_tracepoint_insn_len (void)
3013 /* Implementation of linux_target_ops method "supports_range_stepping". */
3016 aarch64_supports_range_stepping (void)
3021 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3023 static const gdb_byte
*
3024 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3026 if (is_64bit_tdesc ())
3028 *size
= aarch64_breakpoint_len
;
3029 return aarch64_breakpoint
;
3032 return arm_sw_breakpoint_from_kind (kind
, size
);
3035 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3038 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3040 if (is_64bit_tdesc ())
3041 return aarch64_breakpoint_len
;
3043 return arm_breakpoint_kind_from_pc (pcptr
);
3046 /* Implementation of the linux_target_ops method
3047 "breakpoint_kind_from_current_state". */
3050 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3052 if (is_64bit_tdesc ())
3053 return aarch64_breakpoint_len
;
3055 return arm_breakpoint_kind_from_current_state (pcptr
);
3058 /* Support for hardware single step. */
3061 aarch64_supports_hardware_single_step (void)
3066 struct linux_target_ops the_low_target
=
3069 NULL
, /* cannot_fetch_register */
3070 NULL
, /* cannot_store_register */
3071 NULL
, /* fetch_register */
3074 aarch64_breakpoint_kind_from_pc
,
3075 aarch64_sw_breakpoint_from_kind
,
3076 NULL
, /* get_next_pcs */
3077 0, /* decr_pc_after_break */
3078 aarch64_breakpoint_at
,
3079 aarch64_supports_z_point_type
,
3080 aarch64_insert_point
,
3081 aarch64_remove_point
,
3082 aarch64_stopped_by_watchpoint
,
3083 aarch64_stopped_data_address
,
3084 NULL
, /* collect_ptrace_register */
3085 NULL
, /* supply_ptrace_register */
3086 aarch64_linux_siginfo_fixup
,
3087 aarch64_linux_new_process
,
3088 aarch64_linux_delete_process
,
3089 aarch64_linux_new_thread
,
3090 aarch64_linux_delete_thread
,
3091 aarch64_linux_new_fork
,
3092 aarch64_linux_prepare_to_resume
,
3093 NULL
, /* process_qsupported */
3094 aarch64_supports_tracepoints
,
3095 aarch64_get_thread_area
,
3096 aarch64_install_fast_tracepoint_jump_pad
,
3098 aarch64_get_min_fast_tracepoint_insn_len
,
3099 aarch64_supports_range_stepping
,
3100 aarch64_breakpoint_kind_from_current_state
,
3101 aarch64_supports_hardware_single_step
,
3102 aarch64_get_syscall_trapinfo
,
3105 /* The linux target ops object. */
3107 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3110 initialize_low_arch (void)
3112 initialize_low_arch_aarch32 ();
3114 initialize_regsets_info (&aarch64_regsets_info
);
3115 initialize_regsets_info (&aarch64_sve_regsets_info
);