1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Per-process arch-specific data we want to keep. */
54 struct arch_process_info
56 /* Hardware breakpoint/watchpoint data.
57 The reason for them to be per-process rather than per-thread is
58 due to the lack of information in the gdbserver environment;
59 gdbserver is not told that whether a requested hardware
60 breakpoint/watchpoint is thread specific or not, so it has to set
61 each hw bp/wp for every thread in the current process. The
62 higher level bp/wp management in gdb will resume a thread if a hw
63 bp/wp trap is not expected for it. Since the hw bp/wp setting is
64 same for each thread, it is reasonable for the data to live here.
66 struct aarch64_debug_reg_state debug_reg_state
;
69 /* Return true if the size of register 0 is 8 byte. */
74 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
76 return register_size (regcache
->tdesc
, 0) == 8;
79 /* Return true if the regcache contains the number of SVE registers. */
84 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
86 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
90 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
92 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
95 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
96 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
97 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
98 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
99 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
103 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
105 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
108 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
109 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
110 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
111 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
112 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
116 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
118 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
121 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
122 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
123 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
124 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
128 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
130 const struct user_fpsimd_state
*regset
131 = (const struct user_fpsimd_state
*) buf
;
134 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
135 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
136 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
137 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
140 /* Store the pauth registers to regcache. */
143 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
145 uint64_t *pauth_regset
= (uint64_t *) buf
;
146 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
151 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
153 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
157 /* Implementation of linux_target_ops method "get_pc". */
160 aarch64_get_pc (struct regcache
*regcache
)
162 if (register_size (regcache
->tdesc
, 0) == 8)
163 return linux_get_pc_64bit (regcache
);
165 return linux_get_pc_32bit (regcache
);
168 /* Implementation of linux_target_ops method "set_pc". */
171 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
173 if (register_size (regcache
->tdesc
, 0) == 8)
174 linux_set_pc_64bit (regcache
, pc
);
176 linux_set_pc_32bit (regcache
, pc
);
179 #define aarch64_breakpoint_len 4
181 /* AArch64 BRK software debug mode instruction.
182 This instruction needs to match gdb/aarch64-tdep.c
183 (aarch64_default_breakpoint). */
184 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
186 /* Implementation of linux_target_ops method "breakpoint_at". */
189 aarch64_breakpoint_at (CORE_ADDR where
)
191 if (is_64bit_tdesc ())
193 gdb_byte insn
[aarch64_breakpoint_len
];
195 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
196 aarch64_breakpoint_len
);
197 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
203 return arm_breakpoint_at (where
);
207 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
211 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
213 state
->dr_addr_bp
[i
] = 0;
214 state
->dr_ctrl_bp
[i
] = 0;
215 state
->dr_ref_count_bp
[i
] = 0;
218 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
220 state
->dr_addr_wp
[i
] = 0;
221 state
->dr_ctrl_wp
[i
] = 0;
222 state
->dr_ref_count_wp
[i
] = 0;
226 /* Return the pointer to the debug register state structure in the
227 current process' arch-specific data area. */
229 struct aarch64_debug_reg_state
*
230 aarch64_get_debug_reg_state (pid_t pid
)
232 struct process_info
*proc
= find_process_pid (pid
);
234 return &proc
->priv
->arch_private
->debug_reg_state
;
237 /* Implementation of linux_target_ops method "supports_z_point_type". */
240 aarch64_supports_z_point_type (char z_type
)
246 case Z_PACKET_WRITE_WP
:
247 case Z_PACKET_READ_WP
:
248 case Z_PACKET_ACCESS_WP
:
255 /* Implementation of linux_target_ops method "insert_point".
257 It actually only records the info of the to-be-inserted bp/wp;
258 the actual insertion will happen when threads are resumed. */
261 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
262 int len
, struct raw_breakpoint
*bp
)
265 enum target_hw_bp_type targ_type
;
266 struct aarch64_debug_reg_state
*state
267 = aarch64_get_debug_reg_state (pid_of (current_thread
));
270 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
271 (unsigned long) addr
, len
);
273 /* Determine the type from the raw breakpoint type. */
274 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
276 if (targ_type
!= hw_execute
)
278 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
279 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
280 1 /* is_insert */, state
);
288 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
289 instruction. Set it to 2 to correctly encode length bit
290 mask in hardware/watchpoint control register. */
293 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
294 1 /* is_insert */, state
);
298 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
304 /* Implementation of linux_target_ops method "remove_point".
306 It actually only records the info of the to-be-removed bp/wp,
307 the actual removal will be done when threads are resumed. */
310 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
311 int len
, struct raw_breakpoint
*bp
)
314 enum target_hw_bp_type targ_type
;
315 struct aarch64_debug_reg_state
*state
316 = aarch64_get_debug_reg_state (pid_of (current_thread
));
319 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
320 (unsigned long) addr
, len
);
322 /* Determine the type from the raw breakpoint type. */
323 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
325 /* Set up state pointers. */
326 if (targ_type
!= hw_execute
)
328 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
334 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
335 instruction. Set it to 2 to correctly encode length bit
336 mask in hardware/watchpoint control register. */
339 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
340 0 /* is_insert */, state
);
344 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
350 /* Implementation of linux_target_ops method "stopped_data_address". */
353 aarch64_stopped_data_address (void)
357 struct aarch64_debug_reg_state
*state
;
359 pid
= lwpid_of (current_thread
);
361 /* Get the siginfo. */
362 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
363 return (CORE_ADDR
) 0;
365 /* Need to be a hardware breakpoint/watchpoint trap. */
366 if (siginfo
.si_signo
!= SIGTRAP
367 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
368 return (CORE_ADDR
) 0;
370 /* Check if the address matches any watched address. */
371 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
372 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
374 const unsigned int offset
375 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
376 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
377 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
378 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
379 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
380 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
382 if (state
->dr_ref_count_wp
[i
]
383 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
384 && addr_trap
>= addr_watch_aligned
385 && addr_trap
< addr_watch
+ len
)
387 /* ADDR_TRAP reports the first address of the memory range
388 accessed by the CPU, regardless of what was the memory
389 range watched. Thus, a large CPU access that straddles
390 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
391 ADDR_TRAP that is lower than the
392 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
394 addr: | 4 | 5 | 6 | 7 | 8 |
395 |---- range watched ----|
396 |----------- range accessed ------------|
398 In this case, ADDR_TRAP will be 4.
400 To match a watchpoint known to GDB core, we must never
401 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
402 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
403 positive on kernels older than 4.10. See PR
409 return (CORE_ADDR
) 0;
412 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
415 aarch64_stopped_by_watchpoint (void)
417 if (aarch64_stopped_data_address () != 0)
423 /* Fetch the thread-local storage pointer for libthread_db. */
426 ps_get_thread_area (struct ps_prochandle
*ph
,
427 lwpid_t lwpid
, int idx
, void **base
)
429 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
433 /* Implementation of linux_target_ops method "siginfo_fixup". */
436 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
438 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
439 if (!is_64bit_tdesc ())
442 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
445 aarch64_siginfo_from_compat_siginfo (native
,
446 (struct compat_siginfo
*) inf
);
454 /* Implementation of linux_target_ops method "new_process". */
456 static struct arch_process_info
*
457 aarch64_linux_new_process (void)
459 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
461 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
466 /* Implementation of linux_target_ops method "delete_process". */
469 aarch64_linux_delete_process (struct arch_process_info
*info
)
474 /* Implementation of linux_target_ops method "linux_new_fork". */
477 aarch64_linux_new_fork (struct process_info
*parent
,
478 struct process_info
*child
)
480 /* These are allocated by linux_add_process. */
481 gdb_assert (parent
->priv
!= NULL
482 && parent
->priv
->arch_private
!= NULL
);
483 gdb_assert (child
->priv
!= NULL
484 && child
->priv
->arch_private
!= NULL
);
486 /* Linux kernel before 2.6.33 commit
487 72f674d203cd230426437cdcf7dd6f681dad8b0d
488 will inherit hardware debug registers from parent
489 on fork/vfork/clone. Newer Linux kernels create such tasks with
490 zeroed debug registers.
492 GDB core assumes the child inherits the watchpoints/hw
493 breakpoints of the parent, and will remove them all from the
494 forked off process. Copy the debug registers mirrors into the
495 new process so that all breakpoints and watchpoints can be
496 removed together. The debug registers mirror will become zeroed
497 in the end before detaching the forked off process, thus making
498 this compatible with older Linux kernels too. */
500 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
503 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
504 #define AARCH64_HWCAP_PACA (1 << 30)
506 /* Implementation of linux_target_ops method "arch_setup". */
509 aarch64_arch_setup (void)
511 unsigned int machine
;
515 tid
= lwpid_of (current_thread
);
517 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
521 uint64_t vq
= aarch64_sve_get_vq (tid
);
522 unsigned long hwcap
= linux_get_hwcap (8);
523 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
525 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
528 current_process ()->tdesc
= aarch32_linux_read_description ();
530 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
533 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
536 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
538 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
541 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
544 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
546 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
549 static struct regset_info aarch64_regsets
[] =
551 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
552 sizeof (struct user_pt_regs
), GENERAL_REGS
,
553 aarch64_fill_gregset
, aarch64_store_gregset
},
554 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
555 sizeof (struct user_fpsimd_state
), FP_REGS
,
556 aarch64_fill_fpregset
, aarch64_store_fpregset
558 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
559 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
560 NULL
, aarch64_store_pauthregset
},
564 static struct regsets_info aarch64_regsets_info
=
566 aarch64_regsets
, /* regsets */
568 NULL
, /* disabled_regsets */
571 static struct regs_info regs_info_aarch64
=
573 NULL
, /* regset_bitmap */
575 &aarch64_regsets_info
,
578 static struct regset_info aarch64_sve_regsets
[] =
580 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
581 sizeof (struct user_pt_regs
), GENERAL_REGS
,
582 aarch64_fill_gregset
, aarch64_store_gregset
},
583 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
584 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
585 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
587 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
588 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
589 NULL
, aarch64_store_pauthregset
},
593 static struct regsets_info aarch64_sve_regsets_info
=
595 aarch64_sve_regsets
, /* regsets. */
596 0, /* num_regsets. */
597 NULL
, /* disabled_regsets. */
600 static struct regs_info regs_info_aarch64_sve
=
602 NULL
, /* regset_bitmap. */
604 &aarch64_sve_regsets_info
,
607 /* Implementation of linux_target_ops method "regs_info". */
609 static const struct regs_info
*
610 aarch64_regs_info (void)
612 if (!is_64bit_tdesc ())
613 return ®s_info_aarch32
;
616 return ®s_info_aarch64_sve
;
618 return ®s_info_aarch64
;
621 /* Implementation of linux_target_ops method "supports_tracepoints". */
624 aarch64_supports_tracepoints (void)
626 if (current_thread
== NULL
)
630 /* We don't support tracepoints on aarch32 now. */
631 return is_64bit_tdesc ();
635 /* Implementation of linux_target_ops method "get_thread_area". */
638 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
643 iovec
.iov_base
= ®
;
644 iovec
.iov_len
= sizeof (reg
);
646 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
654 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
657 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
659 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
665 collect_register_by_name (regcache
, "x8", &l_sysno
);
666 *sysno
= (int) l_sysno
;
669 collect_register_by_name (regcache
, "r7", sysno
);
672 /* List of condition codes that we need. */
674 enum aarch64_condition_codes
685 enum aarch64_operand_type
691 /* Representation of an operand. At this time, it only supports register
692 and immediate types. */
694 struct aarch64_operand
696 /* Type of the operand. */
697 enum aarch64_operand_type type
;
699 /* Value of the operand according to the type. */
703 struct aarch64_register reg
;
707 /* List of registers that we are currently using, we can add more here as
708 we need to use them. */
710 /* General purpose scratch registers (64 bit). */
711 static const struct aarch64_register x0
= { 0, 1 };
712 static const struct aarch64_register x1
= { 1, 1 };
713 static const struct aarch64_register x2
= { 2, 1 };
714 static const struct aarch64_register x3
= { 3, 1 };
715 static const struct aarch64_register x4
= { 4, 1 };
717 /* General purpose scratch registers (32 bit). */
718 static const struct aarch64_register w0
= { 0, 0 };
719 static const struct aarch64_register w2
= { 2, 0 };
721 /* Intra-procedure scratch registers. */
722 static const struct aarch64_register ip0
= { 16, 1 };
724 /* Special purpose registers. */
725 static const struct aarch64_register fp
= { 29, 1 };
726 static const struct aarch64_register lr
= { 30, 1 };
727 static const struct aarch64_register sp
= { 31, 1 };
728 static const struct aarch64_register xzr
= { 31, 1 };
730 /* Dynamically allocate a new register. If we know the register
731 statically, we should make it a global as above instead of using this
734 static struct aarch64_register
735 aarch64_register (unsigned num
, int is64
)
737 return (struct aarch64_register
) { num
, is64
};
740 /* Helper function to create a register operand, for instructions with
741 different types of operands.
744 p += emit_mov (p, x0, register_operand (x1)); */
746 static struct aarch64_operand
747 register_operand (struct aarch64_register reg
)
749 struct aarch64_operand operand
;
751 operand
.type
= OPERAND_REGISTER
;
757 /* Helper function to create an immediate operand, for instructions with
758 different types of operands.
761 p += emit_mov (p, x0, immediate_operand (12)); */
763 static struct aarch64_operand
764 immediate_operand (uint32_t imm
)
766 struct aarch64_operand operand
;
768 operand
.type
= OPERAND_IMMEDIATE
;
774 /* Helper function to create an offset memory operand.
777 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
779 static struct aarch64_memory_operand
780 offset_memory_operand (int32_t offset
)
782 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
785 /* Helper function to create a pre-index memory operand.
788 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
790 static struct aarch64_memory_operand
791 preindex_memory_operand (int32_t index
)
793 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
796 /* Helper function to create a post-index memory operand.
799 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
801 static struct aarch64_memory_operand
802 postindex_memory_operand (int32_t index
)
804 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
807 /* System control registers. These special registers can be written and
808 read with the MRS and MSR instructions.
810 - NZCV: Condition flags. GDB refers to this register under the CPSR
812 - FPSR: Floating-point status register.
813 - FPCR: Floating-point control registers.
814 - TPIDR_EL0: Software thread ID register. */
816 enum aarch64_system_control_registers
818 /* op0 op1 crn crm op2 */
819 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
820 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
821 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
822 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
825 /* Write a BLR instruction into *BUF.
829 RN is the register to branch to. */
832 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
834 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
837 /* Write a RET instruction into *BUF.
841 RN is the register to branch to. */
844 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
846 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
850 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
851 struct aarch64_register rt
,
852 struct aarch64_register rt2
,
853 struct aarch64_register rn
,
854 struct aarch64_memory_operand operand
)
861 opc
= ENCODE (2, 2, 30);
863 opc
= ENCODE (0, 2, 30);
865 switch (operand
.type
)
867 case MEMORY_OPERAND_OFFSET
:
869 pre_index
= ENCODE (1, 1, 24);
870 write_back
= ENCODE (0, 1, 23);
873 case MEMORY_OPERAND_POSTINDEX
:
875 pre_index
= ENCODE (0, 1, 24);
876 write_back
= ENCODE (1, 1, 23);
879 case MEMORY_OPERAND_PREINDEX
:
881 pre_index
= ENCODE (1, 1, 24);
882 write_back
= ENCODE (1, 1, 23);
889 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
890 | ENCODE (operand
.index
>> 3, 7, 15)
891 | ENCODE (rt2
.num
, 5, 10)
892 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
895 /* Write a STP instruction into *BUF.
897 STP rt, rt2, [rn, #offset]
898 STP rt, rt2, [rn, #index]!
899 STP rt, rt2, [rn], #index
901 RT and RT2 are the registers to store.
902 RN is the base address register.
903 OFFSET is the immediate to add to the base address. It is limited to a
904 -512 .. 504 range (7 bits << 3). */
907 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
908 struct aarch64_register rt2
, struct aarch64_register rn
,
909 struct aarch64_memory_operand operand
)
911 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
914 /* Write a LDP instruction into *BUF.
916 LDP rt, rt2, [rn, #offset]
917 LDP rt, rt2, [rn, #index]!
918 LDP rt, rt2, [rn], #index
920 RT and RT2 are the registers to store.
921 RN is the base address register.
922 OFFSET is the immediate to add to the base address. It is limited to a
923 -512 .. 504 range (7 bits << 3). */
926 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
927 struct aarch64_register rt2
, struct aarch64_register rn
,
928 struct aarch64_memory_operand operand
)
930 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
933 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
935 LDP qt, qt2, [rn, #offset]
937 RT and RT2 are the Q registers to store.
938 RN is the base address register.
939 OFFSET is the immediate to add to the base address. It is limited to
940 -1024 .. 1008 range (7 bits << 4). */
943 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
944 struct aarch64_register rn
, int32_t offset
)
946 uint32_t opc
= ENCODE (2, 2, 30);
947 uint32_t pre_index
= ENCODE (1, 1, 24);
949 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
950 | ENCODE (offset
>> 4, 7, 15)
951 | ENCODE (rt2
, 5, 10)
952 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
955 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
957 STP qt, qt2, [rn, #offset]
959 RT and RT2 are the Q registers to store.
960 RN is the base address register.
961 OFFSET is the immediate to add to the base address. It is limited to
962 -1024 .. 1008 range (7 bits << 4). */
965 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
966 struct aarch64_register rn
, int32_t offset
)
968 uint32_t opc
= ENCODE (2, 2, 30);
969 uint32_t pre_index
= ENCODE (1, 1, 24);
971 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
972 | ENCODE (offset
>> 4, 7, 15)
973 | ENCODE (rt2
, 5, 10)
974 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
977 /* Write a LDRH instruction into *BUF.
979 LDRH wt, [xn, #offset]
980 LDRH wt, [xn, #index]!
981 LDRH wt, [xn], #index
983 RT is the register to store.
984 RN is the base address register.
985 OFFSET is the immediate to add to the base address. It is limited to
986 0 .. 32760 range (12 bits << 3). */
989 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
990 struct aarch64_register rn
,
991 struct aarch64_memory_operand operand
)
993 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
996 /* Write a LDRB instruction into *BUF.
998 LDRB wt, [xn, #offset]
999 LDRB wt, [xn, #index]!
1000 LDRB wt, [xn], #index
1002 RT is the register to store.
1003 RN is the base address register.
1004 OFFSET is the immediate to add to the base address. It is limited to
1005 0 .. 32760 range (12 bits << 3). */
1008 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1009 struct aarch64_register rn
,
1010 struct aarch64_memory_operand operand
)
1012 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1017 /* Write a STR instruction into *BUF.
1019 STR rt, [rn, #offset]
1020 STR rt, [rn, #index]!
1021 STR rt, [rn], #index
1023 RT is the register to store.
1024 RN is the base address register.
1025 OFFSET is the immediate to add to the base address. It is limited to
1026 0 .. 32760 range (12 bits << 3). */
1029 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1030 struct aarch64_register rn
,
1031 struct aarch64_memory_operand operand
)
1033 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1036 /* Helper function emitting an exclusive load or store instruction. */
1039 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1040 enum aarch64_opcodes opcode
,
1041 struct aarch64_register rs
,
1042 struct aarch64_register rt
,
1043 struct aarch64_register rt2
,
1044 struct aarch64_register rn
)
1046 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1047 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1048 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1051 /* Write a LAXR instruction into *BUF.
1055 RT is the destination register.
1056 RN is the base address register. */
1059 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1060 struct aarch64_register rn
)
1062 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1066 /* Write a STXR instruction into *BUF.
1070 RS is the result register, it indicates if the store succeeded or not.
1071 RT is the destination register.
1072 RN is the base address register. */
1075 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1076 struct aarch64_register rt
, struct aarch64_register rn
)
1078 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1082 /* Write a STLR instruction into *BUF.
1086 RT is the register to store.
1087 RN is the base address register. */
1090 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1091 struct aarch64_register rn
)
1093 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1097 /* Helper function for data processing instructions with register sources. */
1100 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1101 struct aarch64_register rd
,
1102 struct aarch64_register rn
,
1103 struct aarch64_register rm
)
1105 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1107 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1108 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1111 /* Helper function for data processing instructions taking either a register
1115 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1116 struct aarch64_register rd
,
1117 struct aarch64_register rn
,
1118 struct aarch64_operand operand
)
1120 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1121 /* The opcode is different for register and immediate source operands. */
1122 uint32_t operand_opcode
;
1124 if (operand
.type
== OPERAND_IMMEDIATE
)
1126 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1127 operand_opcode
= ENCODE (8, 4, 25);
1129 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1130 | ENCODE (operand
.imm
, 12, 10)
1131 | ENCODE (rn
.num
, 5, 5)
1132 | ENCODE (rd
.num
, 5, 0));
1136 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1137 operand_opcode
= ENCODE (5, 4, 25);
1139 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1144 /* Write an ADD instruction into *BUF.
1149 This function handles both an immediate and register add.
1151 RD is the destination register.
1152 RN is the input register.
1153 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1154 OPERAND_REGISTER. */
1157 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1158 struct aarch64_register rn
, struct aarch64_operand operand
)
1160 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1163 /* Write a SUB instruction into *BUF.
1168 This function handles both an immediate and register sub.
1170 RD is the destination register.
1171 RN is the input register.
1172 IMM is the immediate to substract to RN. */
1175 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1176 struct aarch64_register rn
, struct aarch64_operand operand
)
1178 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1181 /* Write a MOV instruction into *BUF.
1186 This function handles both a wide immediate move and a register move,
1187 with the condition that the source register is not xzr. xzr and the
1188 stack pointer share the same encoding and this function only supports
1191 RD is the destination register.
1192 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1193 OPERAND_REGISTER. */
1196 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1197 struct aarch64_operand operand
)
1199 if (operand
.type
== OPERAND_IMMEDIATE
)
1201 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1202 /* Do not shift the immediate. */
1203 uint32_t shift
= ENCODE (0, 2, 21);
1205 return aarch64_emit_insn (buf
, MOV
| size
| shift
1206 | ENCODE (operand
.imm
, 16, 5)
1207 | ENCODE (rd
.num
, 5, 0));
1210 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1213 /* Write a MOVK instruction into *BUF.
1215 MOVK rd, #imm, lsl #shift
1217 RD is the destination register.
1218 IMM is the immediate.
1219 SHIFT is the logical shift left to apply to IMM. */
1222 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1225 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1227 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1228 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1231 /* Write instructions into *BUF in order to move ADDR into a register.
1232 ADDR can be a 64-bit value.
1234 This function will emit a series of MOV and MOVK instructions, such as:
1237 MOVK xd, #(addr >> 16), lsl #16
1238 MOVK xd, #(addr >> 32), lsl #32
1239 MOVK xd, #(addr >> 48), lsl #48 */
1242 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1246 /* The MOV (wide immediate) instruction clears to top bits of the
1248 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1250 if ((addr
>> 16) != 0)
1251 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1255 if ((addr
>> 32) != 0)
1256 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1260 if ((addr
>> 48) != 0)
1261 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1266 /* Write a SUBS instruction into *BUF.
1270 This instruction update the condition flags.
1272 RD is the destination register.
1273 RN and RM are the source registers. */
1276 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1277 struct aarch64_register rn
, struct aarch64_operand operand
)
1279 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1282 /* Write a CMP instruction into *BUF.
1286 This instruction is an alias of SUBS xzr, rn, rm.
1288 RN and RM are the registers to compare. */
1291 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1292 struct aarch64_operand operand
)
1294 return emit_subs (buf
, xzr
, rn
, operand
);
1297 /* Write a AND instruction into *BUF.
1301 RD is the destination register.
1302 RN and RM are the source registers. */
1305 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1306 struct aarch64_register rn
, struct aarch64_register rm
)
1308 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1311 /* Write a ORR instruction into *BUF.
1315 RD is the destination register.
1316 RN and RM are the source registers. */
1319 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1320 struct aarch64_register rn
, struct aarch64_register rm
)
1322 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1325 /* Write a ORN instruction into *BUF.
1329 RD is the destination register.
1330 RN and RM are the source registers. */
1333 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1334 struct aarch64_register rn
, struct aarch64_register rm
)
1336 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1339 /* Write a EOR instruction into *BUF.
1343 RD is the destination register.
1344 RN and RM are the source registers. */
1347 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1348 struct aarch64_register rn
, struct aarch64_register rm
)
1350 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1353 /* Write a MVN instruction into *BUF.
1357 This is an alias for ORN rd, xzr, rm.
1359 RD is the destination register.
1360 RM is the source register. */
1363 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1364 struct aarch64_register rm
)
1366 return emit_orn (buf
, rd
, xzr
, rm
);
1369 /* Write a LSLV instruction into *BUF.
1373 RD is the destination register.
1374 RN and RM are the source registers. */
1377 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1378 struct aarch64_register rn
, struct aarch64_register rm
)
1380 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1383 /* Write a LSRV instruction into *BUF.
1387 RD is the destination register.
1388 RN and RM are the source registers. */
1391 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1392 struct aarch64_register rn
, struct aarch64_register rm
)
1394 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1397 /* Write a ASRV instruction into *BUF.
1401 RD is the destination register.
1402 RN and RM are the source registers. */
1405 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1406 struct aarch64_register rn
, struct aarch64_register rm
)
1408 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1411 /* Write a MUL instruction into *BUF.
1415 RD is the destination register.
1416 RN and RM are the source registers. */
1419 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1420 struct aarch64_register rn
, struct aarch64_register rm
)
1422 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1425 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1429 RT is the destination register.
1430 SYSTEM_REG is special purpose register to read. */
1433 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1434 enum aarch64_system_control_registers system_reg
)
1436 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1437 | ENCODE (rt
.num
, 5, 0));
1440 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1444 SYSTEM_REG is special purpose register to write.
1445 RT is the input register. */
1448 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1449 struct aarch64_register rt
)
1451 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1452 | ENCODE (rt
.num
, 5, 0));
1455 /* Write a SEVL instruction into *BUF.
1457 This is a hint instruction telling the hardware to trigger an event. */
1460 emit_sevl (uint32_t *buf
)
1462 return aarch64_emit_insn (buf
, SEVL
);
1465 /* Write a WFE instruction into *BUF.
1467 This is a hint instruction telling the hardware to wait for an event. */
1470 emit_wfe (uint32_t *buf
)
1472 return aarch64_emit_insn (buf
, WFE
);
1475 /* Write a SBFM instruction into *BUF.
1477 SBFM rd, rn, #immr, #imms
1479 This instruction moves the bits from #immr to #imms into the
1480 destination, sign extending the result.
1482 RD is the destination register.
1483 RN is the source register.
1484 IMMR is the bit number to start at (least significant bit).
1485 IMMS is the bit number to stop at (most significant bit). */
1488 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1489 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1491 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1492 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1494 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1495 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1496 | ENCODE (rd
.num
, 5, 0));
1499 /* Write a SBFX instruction into *BUF.
1501 SBFX rd, rn, #lsb, #width
1503 This instruction moves #width bits from #lsb into the destination, sign
1504 extending the result. This is an alias for:
1506 SBFM rd, rn, #lsb, #(lsb + width - 1)
1508 RD is the destination register.
1509 RN is the source register.
1510 LSB is the bit number to start at (least significant bit).
1511 WIDTH is the number of bits to move. */
1514 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1515 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1517 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1520 /* Write a UBFM instruction into *BUF.
1522 UBFM rd, rn, #immr, #imms
1524 This instruction moves the bits from #immr to #imms into the
1525 destination, extending the result with zeros.
1527 RD is the destination register.
1528 RN is the source register.
1529 IMMR is the bit number to start at (least significant bit).
1530 IMMS is the bit number to stop at (most significant bit). */
1533 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1534 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1536 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1537 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1539 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1540 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1541 | ENCODE (rd
.num
, 5, 0));
1544 /* Write a UBFX instruction into *BUF.
1546 UBFX rd, rn, #lsb, #width
1548 This instruction moves #width bits from #lsb into the destination,
1549 extending the result with zeros. This is an alias for:
1551 UBFM rd, rn, #lsb, #(lsb + width - 1)
1553 RD is the destination register.
1554 RN is the source register.
1555 LSB is the bit number to start at (least significant bit).
1556 WIDTH is the number of bits to move. */
1559 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1560 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1562 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1565 /* Write a CSINC instruction into *BUF.
1567 CSINC rd, rn, rm, cond
1569 This instruction conditionally increments rn or rm and places the result
1570 in rd. rn is chosen is the condition is true.
1572 RD is the destination register.
1573 RN and RM are the source registers.
1574 COND is the encoded condition. */
1577 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1578 struct aarch64_register rn
, struct aarch64_register rm
,
1581 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1583 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1584 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1585 | ENCODE (rd
.num
, 5, 0));
1588 /* Write a CSET instruction into *BUF.
1592 This instruction conditionally write 1 or 0 in the destination register.
1593 1 is written if the condition is true. This is an alias for:
1595 CSINC rd, xzr, xzr, !cond
1597 Note that the condition needs to be inverted.
1599 RD is the destination register.
1600 RN and RM are the source registers.
1601 COND is the encoded condition. */
1604 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1606 /* The least significant bit of the condition needs toggling in order to
1608 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1611 /* Write LEN instructions from BUF into the inferior memory at *TO.
1613 Note instructions are always little endian on AArch64, unlike data. */
1616 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1618 size_t byte_len
= len
* sizeof (uint32_t);
1619 #if (__BYTE_ORDER == __BIG_ENDIAN)
1620 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1623 for (i
= 0; i
< len
; i
++)
1624 le_buf
[i
] = htole32 (buf
[i
]);
1626 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1630 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1636 /* Sub-class of struct aarch64_insn_data, store information of
1637 instruction relocation for fast tracepoint. Visitor can
1638 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1639 the relocated instructions in buffer pointed by INSN_PTR. */
1641 struct aarch64_insn_relocation_data
1643 struct aarch64_insn_data base
;
1645 /* The new address the instruction is relocated to. */
1647 /* Pointer to the buffer of relocated instruction(s). */
1651 /* Implementation of aarch64_insn_visitor method "b". */
1654 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1655 struct aarch64_insn_data
*data
)
1657 struct aarch64_insn_relocation_data
*insn_reloc
1658 = (struct aarch64_insn_relocation_data
*) data
;
1660 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1662 if (can_encode_int32 (new_offset
, 28))
1663 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1666 /* Implementation of aarch64_insn_visitor method "b_cond". */
1669 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1670 struct aarch64_insn_data
*data
)
1672 struct aarch64_insn_relocation_data
*insn_reloc
1673 = (struct aarch64_insn_relocation_data
*) data
;
1675 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1677 if (can_encode_int32 (new_offset
, 21))
1679 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1682 else if (can_encode_int32 (new_offset
, 28))
1684 /* The offset is out of range for a conditional branch
1685 instruction but not for a unconditional branch. We can use
1686 the following instructions instead:
1688 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1689 B NOT_TAKEN ; Else jump over TAKEN and continue.
1696 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1697 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1698 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1702 /* Implementation of aarch64_insn_visitor method "cb". */
1705 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1706 const unsigned rn
, int is64
,
1707 struct aarch64_insn_data
*data
)
1709 struct aarch64_insn_relocation_data
*insn_reloc
1710 = (struct aarch64_insn_relocation_data
*) data
;
1712 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1714 if (can_encode_int32 (new_offset
, 21))
1716 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1717 aarch64_register (rn
, is64
), new_offset
);
1719 else if (can_encode_int32 (new_offset
, 28))
1721 /* The offset is out of range for a compare and branch
1722 instruction but not for a unconditional branch. We can use
1723 the following instructions instead:
1725 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1726 B NOT_TAKEN ; Else jump over TAKEN and continue.
1732 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1733 aarch64_register (rn
, is64
), 8);
1734 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1735 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1739 /* Implementation of aarch64_insn_visitor method "tb". */
1742 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1743 const unsigned rt
, unsigned bit
,
1744 struct aarch64_insn_data
*data
)
1746 struct aarch64_insn_relocation_data
*insn_reloc
1747 = (struct aarch64_insn_relocation_data
*) data
;
1749 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1751 if (can_encode_int32 (new_offset
, 16))
1753 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1754 aarch64_register (rt
, 1), new_offset
);
1756 else if (can_encode_int32 (new_offset
, 28))
1758 /* The offset is out of range for a test bit and branch
1759 instruction but not for a unconditional branch. We can use
1760 the following instructions instead:
1762 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1763 B NOT_TAKEN ; Else jump over TAKEN and continue.
1769 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1770 aarch64_register (rt
, 1), 8);
1771 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1772 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1777 /* Implementation of aarch64_insn_visitor method "adr". */
1780 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1782 struct aarch64_insn_data
*data
)
1784 struct aarch64_insn_relocation_data
*insn_reloc
1785 = (struct aarch64_insn_relocation_data
*) data
;
1786 /* We know exactly the address the ADR{P,} instruction will compute.
1787 We can just write it to the destination register. */
1788 CORE_ADDR address
= data
->insn_addr
+ offset
;
1792 /* Clear the lower 12 bits of the offset to get the 4K page. */
1793 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1794 aarch64_register (rd
, 1),
1798 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1799 aarch64_register (rd
, 1), address
);
1802 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1805 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1806 const unsigned rt
, const int is64
,
1807 struct aarch64_insn_data
*data
)
1809 struct aarch64_insn_relocation_data
*insn_reloc
1810 = (struct aarch64_insn_relocation_data
*) data
;
1811 CORE_ADDR address
= data
->insn_addr
+ offset
;
1813 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1814 aarch64_register (rt
, 1), address
);
1816 /* We know exactly what address to load from, and what register we
1819 MOV xd, #(oldloc + offset)
1820 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1823 LDR xd, [xd] ; or LDRSW xd, [xd]
1828 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1829 aarch64_register (rt
, 1),
1830 aarch64_register (rt
, 1),
1831 offset_memory_operand (0));
1833 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1834 aarch64_register (rt
, is64
),
1835 aarch64_register (rt
, 1),
1836 offset_memory_operand (0));
1839 /* Implementation of aarch64_insn_visitor method "others". */
1842 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1843 struct aarch64_insn_data
*data
)
1845 struct aarch64_insn_relocation_data
*insn_reloc
1846 = (struct aarch64_insn_relocation_data
*) data
;
1848 /* The instruction is not PC relative. Just re-emit it at the new
1850 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1853 static const struct aarch64_insn_visitor visitor
=
1855 aarch64_ftrace_insn_reloc_b
,
1856 aarch64_ftrace_insn_reloc_b_cond
,
1857 aarch64_ftrace_insn_reloc_cb
,
1858 aarch64_ftrace_insn_reloc_tb
,
1859 aarch64_ftrace_insn_reloc_adr
,
1860 aarch64_ftrace_insn_reloc_ldr_literal
,
1861 aarch64_ftrace_insn_reloc_others
,
1864 /* Implementation of linux_target_ops method
1865 "install_fast_tracepoint_jump_pad". */
1868 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1870 CORE_ADDR collector
,
1873 CORE_ADDR
*jump_entry
,
1874 CORE_ADDR
*trampoline
,
1875 ULONGEST
*trampoline_size
,
1876 unsigned char *jjump_pad_insn
,
1877 ULONGEST
*jjump_pad_insn_size
,
1878 CORE_ADDR
*adjusted_insn_addr
,
1879 CORE_ADDR
*adjusted_insn_addr_end
,
1887 CORE_ADDR buildaddr
= *jump_entry
;
1888 struct aarch64_insn_relocation_data insn_data
;
1890 /* We need to save the current state on the stack both to restore it
1891 later and to collect register values when the tracepoint is hit.
1893 The saved registers are pushed in a layout that needs to be in sync
1894 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1895 the supply_fast_tracepoint_registers function will fill in the
1896 register cache from a pointer to saved registers on the stack we build
1899 For simplicity, we set the size of each cell on the stack to 16 bytes.
1900 This way one cell can hold any register type, from system registers
1901 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1902 has to be 16 bytes aligned anyway.
1904 Note that the CPSR register does not exist on AArch64. Instead we
1905 can access system bits describing the process state with the
1906 MRS/MSR instructions, namely the condition flags. We save them as
1907 if they are part of a CPSR register because that's how GDB
1908 interprets these system bits. At the moment, only the condition
1909 flags are saved in CPSR (NZCV).
1911 Stack layout, each cell is 16 bytes (descending):
1913 High *-------- SIMD&FP registers from 31 down to 0. --------*
1919 *---- General purpose registers from 30 down to 0. ----*
1925 *------------- Special purpose registers. -------------*
1928 | CPSR (NZCV) | 5 cells
1931 *------------- collecting_t object --------------------*
1932 | TPIDR_EL0 | struct tracepoint * |
1933 Low *------------------------------------------------------*
1935 After this stack is set up, we issue a call to the collector, passing
1936 it the saved registers at (SP + 16). */
1938 /* Push SIMD&FP registers on the stack:
1940 SUB sp, sp, #(32 * 16)
1942 STP q30, q31, [sp, #(30 * 16)]
1947 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1948 for (i
= 30; i
>= 0; i
-= 2)
1949 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1951 /* Push general purpose registers on the stack. Note that we do not need
1952 to push x31 as it represents the xzr register and not the stack
1953 pointer in a STR instruction.
1955 SUB sp, sp, #(31 * 16)
1957 STR x30, [sp, #(30 * 16)]
1962 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1963 for (i
= 30; i
>= 0; i
-= 1)
1964 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1965 offset_memory_operand (i
* 16));
1967 /* Make space for 5 more cells.
1969 SUB sp, sp, #(5 * 16)
1972 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1977 ADD x4, sp, #((32 + 31 + 5) * 16)
1978 STR x4, [sp, #(4 * 16)]
1981 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1982 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1984 /* Save PC (tracepoint address):
1989 STR x3, [sp, #(3 * 16)]
1993 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1994 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1996 /* Save CPSR (NZCV), FPSR and FPCR:
2002 STR x2, [sp, #(2 * 16)]
2003 STR x1, [sp, #(1 * 16)]
2004 STR x0, [sp, #(0 * 16)]
2007 p
+= emit_mrs (p
, x2
, NZCV
);
2008 p
+= emit_mrs (p
, x1
, FPSR
);
2009 p
+= emit_mrs (p
, x0
, FPCR
);
2010 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2011 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2012 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2014 /* Push the collecting_t object. It consist of the address of the
2015 tracepoint and an ID for the current thread. We get the latter by
2016 reading the tpidr_el0 system register. It corresponds to the
2017 NT_ARM_TLS register accessible with ptrace.
2024 STP x0, x1, [sp, #-16]!
2028 p
+= emit_mov_addr (p
, x0
, tpoint
);
2029 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2030 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2034 The shared memory for the lock is at lockaddr. It will hold zero
2035 if no-one is holding the lock, otherwise it contains the address of
2036 the collecting_t object on the stack of the thread which acquired it.
2038 At this stage, the stack pointer points to this thread's collecting_t
2041 We use the following registers:
2042 - x0: Address of the lock.
2043 - x1: Pointer to collecting_t object.
2044 - x2: Scratch register.
2050 ; Trigger an event local to this core. So the following WFE
2051 ; instruction is ignored.
2054 ; Wait for an event. The event is triggered by either the SEVL
2055 ; or STLR instructions (store release).
2058 ; Atomically read at lockaddr. This marks the memory location as
2059 ; exclusive. This instruction also has memory constraints which
2060 ; make sure all previous data reads and writes are done before
2064 ; Try again if another thread holds the lock.
2067 ; We can lock it! Write the address of the collecting_t object.
2068 ; This instruction will fail if the memory location is not marked
2069 ; as exclusive anymore. If it succeeds, it will remove the
2070 ; exclusive mark on the memory location. This way, if another
2071 ; thread executes this instruction before us, we will fail and try
2078 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2079 p
+= emit_mov (p
, x1
, register_operand (sp
));
2083 p
+= emit_ldaxr (p
, x2
, x0
);
2084 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2085 p
+= emit_stxr (p
, w2
, x1
, x0
);
2086 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2088 /* Call collector (struct tracepoint *, unsigned char *):
2093 ; Saved registers start after the collecting_t object.
2096 ; We use an intra-procedure-call scratch register.
2097 MOV ip0, #(collector)
2100 ; And call back to C!
2105 p
+= emit_mov_addr (p
, x0
, tpoint
);
2106 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2108 p
+= emit_mov_addr (p
, ip0
, collector
);
2109 p
+= emit_blr (p
, ip0
);
2111 /* Release the lock.
2116 ; This instruction is a normal store with memory ordering
2117 ; constraints. Thanks to this we do not have to put a data
2118 ; barrier instruction to make sure all data read and writes are done
2119 ; before this instruction is executed. Furthermore, this instruction
2120 ; will trigger an event, letting other threads know they can grab
2125 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2126 p
+= emit_stlr (p
, xzr
, x0
);
2128 /* Free collecting_t object:
2133 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2135 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2136 registers from the stack.
2138 LDR x2, [sp, #(2 * 16)]
2139 LDR x1, [sp, #(1 * 16)]
2140 LDR x0, [sp, #(0 * 16)]
2146 ADD sp, sp #(5 * 16)
2149 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2150 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2151 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2152 p
+= emit_msr (p
, NZCV
, x2
);
2153 p
+= emit_msr (p
, FPSR
, x1
);
2154 p
+= emit_msr (p
, FPCR
, x0
);
2156 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2158 /* Pop general purpose registers:
2162 LDR x30, [sp, #(30 * 16)]
2164 ADD sp, sp, #(31 * 16)
2167 for (i
= 0; i
<= 30; i
+= 1)
2168 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2169 offset_memory_operand (i
* 16));
2170 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2172 /* Pop SIMD&FP registers:
2176 LDP q30, q31, [sp, #(30 * 16)]
2178 ADD sp, sp, #(32 * 16)
2181 for (i
= 0; i
<= 30; i
+= 2)
2182 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2183 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2185 /* Write the code into the inferior memory. */
2186 append_insns (&buildaddr
, p
- buf
, buf
);
2188 /* Now emit the relocated instruction. */
2189 *adjusted_insn_addr
= buildaddr
;
2190 target_read_uint32 (tpaddr
, &insn
);
2192 insn_data
.base
.insn_addr
= tpaddr
;
2193 insn_data
.new_addr
= buildaddr
;
2194 insn_data
.insn_ptr
= buf
;
2196 aarch64_relocate_instruction (insn
, &visitor
,
2197 (struct aarch64_insn_data
*) &insn_data
);
2199 /* We may not have been able to relocate the instruction. */
2200 if (insn_data
.insn_ptr
== buf
)
2203 "E.Could not relocate instruction from %s to %s.",
2204 core_addr_to_string_nz (tpaddr
),
2205 core_addr_to_string_nz (buildaddr
));
2209 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2210 *adjusted_insn_addr_end
= buildaddr
;
2212 /* Go back to the start of the buffer. */
2215 /* Emit a branch back from the jump pad. */
2216 offset
= (tpaddr
+ orig_size
- buildaddr
);
2217 if (!can_encode_int32 (offset
, 28))
2220 "E.Jump back from jump pad too far from tracepoint "
2221 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2226 p
+= emit_b (p
, 0, offset
);
2227 append_insns (&buildaddr
, p
- buf
, buf
);
2229 /* Give the caller a branch instruction into the jump pad. */
2230 offset
= (*jump_entry
- tpaddr
);
2231 if (!can_encode_int32 (offset
, 28))
2234 "E.Jump pad too far from tracepoint "
2235 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2240 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2241 *jjump_pad_insn_size
= 4;
2243 /* Return the end address of our pad. */
2244 *jump_entry
= buildaddr
;
2249 /* Helper function writing LEN instructions from START into
2250 current_insn_ptr. */
2253 emit_ops_insns (const uint32_t *start
, int len
)
2255 CORE_ADDR buildaddr
= current_insn_ptr
;
2258 debug_printf ("Adding %d instrucions at %s\n",
2259 len
, paddress (buildaddr
));
2261 append_insns (&buildaddr
, len
, start
);
2262 current_insn_ptr
= buildaddr
;
2265 /* Pop a register from the stack. */
2268 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2270 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2273 /* Push a register on the stack. */
2276 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2278 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2281 /* Implementation of emit_ops method "emit_prologue". */
2284 aarch64_emit_prologue (void)
2289 /* This function emit a prologue for the following function prototype:
2291 enum eval_result_type f (unsigned char *regs,
2294 The first argument is a buffer of raw registers. The second
2295 argument is the result of
2296 evaluating the expression, which will be set to whatever is on top of
2297 the stack at the end.
2299 The stack set up by the prologue is as such:
2301 High *------------------------------------------------------*
2304 | x1 (ULONGEST *value) |
2305 | x0 (unsigned char *regs) |
2306 Low *------------------------------------------------------*
2308 As we are implementing a stack machine, each opcode can expand the
2309 stack so we never know how far we are from the data saved by this
2310 prologue. In order to be able refer to value and regs later, we save
2311 the current stack pointer in the frame pointer. This way, it is not
2312 clobbered when calling C functions.
2314 Finally, throughout every operation, we are using register x0 as the
2315 top of the stack, and x1 as a scratch register. */
2317 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2318 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2319 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2321 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2324 emit_ops_insns (buf
, p
- buf
);
2327 /* Implementation of emit_ops method "emit_epilogue". */
2330 aarch64_emit_epilogue (void)
2335 /* Store the result of the expression (x0) in *value. */
2336 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2337 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2338 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2340 /* Restore the previous state. */
2341 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2342 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2344 /* Return expr_eval_no_error. */
2345 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2346 p
+= emit_ret (p
, lr
);
2348 emit_ops_insns (buf
, p
- buf
);
2351 /* Implementation of emit_ops method "emit_add". */
2354 aarch64_emit_add (void)
2359 p
+= emit_pop (p
, x1
);
2360 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2362 emit_ops_insns (buf
, p
- buf
);
2365 /* Implementation of emit_ops method "emit_sub". */
2368 aarch64_emit_sub (void)
2373 p
+= emit_pop (p
, x1
);
2374 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2376 emit_ops_insns (buf
, p
- buf
);
2379 /* Implementation of emit_ops method "emit_mul". */
2382 aarch64_emit_mul (void)
2387 p
+= emit_pop (p
, x1
);
2388 p
+= emit_mul (p
, x0
, x1
, x0
);
2390 emit_ops_insns (buf
, p
- buf
);
2393 /* Implementation of emit_ops method "emit_lsh". */
2396 aarch64_emit_lsh (void)
2401 p
+= emit_pop (p
, x1
);
2402 p
+= emit_lslv (p
, x0
, x1
, x0
);
2404 emit_ops_insns (buf
, p
- buf
);
2407 /* Implementation of emit_ops method "emit_rsh_signed". */
2410 aarch64_emit_rsh_signed (void)
2415 p
+= emit_pop (p
, x1
);
2416 p
+= emit_asrv (p
, x0
, x1
, x0
);
2418 emit_ops_insns (buf
, p
- buf
);
2421 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2424 aarch64_emit_rsh_unsigned (void)
2429 p
+= emit_pop (p
, x1
);
2430 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2432 emit_ops_insns (buf
, p
- buf
);
2435 /* Implementation of emit_ops method "emit_ext". */
2438 aarch64_emit_ext (int arg
)
2443 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2445 emit_ops_insns (buf
, p
- buf
);
2448 /* Implementation of emit_ops method "emit_log_not". */
2451 aarch64_emit_log_not (void)
2456 /* If the top of the stack is 0, replace it with 1. Else replace it with
2459 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2460 p
+= emit_cset (p
, x0
, EQ
);
2462 emit_ops_insns (buf
, p
- buf
);
2465 /* Implementation of emit_ops method "emit_bit_and". */
2468 aarch64_emit_bit_and (void)
2473 p
+= emit_pop (p
, x1
);
2474 p
+= emit_and (p
, x0
, x0
, x1
);
2476 emit_ops_insns (buf
, p
- buf
);
2479 /* Implementation of emit_ops method "emit_bit_or". */
2482 aarch64_emit_bit_or (void)
2487 p
+= emit_pop (p
, x1
);
2488 p
+= emit_orr (p
, x0
, x0
, x1
);
2490 emit_ops_insns (buf
, p
- buf
);
2493 /* Implementation of emit_ops method "emit_bit_xor". */
2496 aarch64_emit_bit_xor (void)
2501 p
+= emit_pop (p
, x1
);
2502 p
+= emit_eor (p
, x0
, x0
, x1
);
2504 emit_ops_insns (buf
, p
- buf
);
2507 /* Implementation of emit_ops method "emit_bit_not". */
2510 aarch64_emit_bit_not (void)
2515 p
+= emit_mvn (p
, x0
, x0
);
2517 emit_ops_insns (buf
, p
- buf
);
2520 /* Implementation of emit_ops method "emit_equal". */
2523 aarch64_emit_equal (void)
2528 p
+= emit_pop (p
, x1
);
2529 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2530 p
+= emit_cset (p
, x0
, EQ
);
2532 emit_ops_insns (buf
, p
- buf
);
2535 /* Implementation of emit_ops method "emit_less_signed". */
2538 aarch64_emit_less_signed (void)
2543 p
+= emit_pop (p
, x1
);
2544 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2545 p
+= emit_cset (p
, x0
, LT
);
2547 emit_ops_insns (buf
, p
- buf
);
2550 /* Implementation of emit_ops method "emit_less_unsigned". */
2553 aarch64_emit_less_unsigned (void)
2558 p
+= emit_pop (p
, x1
);
2559 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2560 p
+= emit_cset (p
, x0
, LO
);
2562 emit_ops_insns (buf
, p
- buf
);
2565 /* Implementation of emit_ops method "emit_ref". */
2568 aarch64_emit_ref (int size
)
2576 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2579 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2582 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2585 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2588 /* Unknown size, bail on compilation. */
2593 emit_ops_insns (buf
, p
- buf
);
2596 /* Implementation of emit_ops method "emit_if_goto". */
2599 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2604 /* The Z flag is set or cleared here. */
2605 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2606 /* This instruction must not change the Z flag. */
2607 p
+= emit_pop (p
, x0
);
2608 /* Branch over the next instruction if x0 == 0. */
2609 p
+= emit_bcond (p
, EQ
, 8);
2611 /* The NOP instruction will be patched with an unconditional branch. */
2613 *offset_p
= (p
- buf
) * 4;
2618 emit_ops_insns (buf
, p
- buf
);
2621 /* Implementation of emit_ops method "emit_goto". */
2624 aarch64_emit_goto (int *offset_p
, int *size_p
)
2629 /* The NOP instruction will be patched with an unconditional branch. */
2636 emit_ops_insns (buf
, p
- buf
);
2639 /* Implementation of emit_ops method "write_goto_address". */
2642 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2646 emit_b (&insn
, 0, to
- from
);
2647 append_insns (&from
, 1, &insn
);
2650 /* Implementation of emit_ops method "emit_const". */
2653 aarch64_emit_const (LONGEST num
)
2658 p
+= emit_mov_addr (p
, x0
, num
);
2660 emit_ops_insns (buf
, p
- buf
);
2663 /* Implementation of emit_ops method "emit_call". */
2666 aarch64_emit_call (CORE_ADDR fn
)
2671 p
+= emit_mov_addr (p
, ip0
, fn
);
2672 p
+= emit_blr (p
, ip0
);
2674 emit_ops_insns (buf
, p
- buf
);
2677 /* Implementation of emit_ops method "emit_reg". */
2680 aarch64_emit_reg (int reg
)
2685 /* Set x0 to unsigned char *regs. */
2686 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2687 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2688 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2690 emit_ops_insns (buf
, p
- buf
);
2692 aarch64_emit_call (get_raw_reg_func_addr ());
2695 /* Implementation of emit_ops method "emit_pop". */
2698 aarch64_emit_pop (void)
2703 p
+= emit_pop (p
, x0
);
2705 emit_ops_insns (buf
, p
- buf
);
2708 /* Implementation of emit_ops method "emit_stack_flush". */
2711 aarch64_emit_stack_flush (void)
2716 p
+= emit_push (p
, x0
);
2718 emit_ops_insns (buf
, p
- buf
);
2721 /* Implementation of emit_ops method "emit_zero_ext". */
2724 aarch64_emit_zero_ext (int arg
)
2729 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2731 emit_ops_insns (buf
, p
- buf
);
2734 /* Implementation of emit_ops method "emit_swap". */
2737 aarch64_emit_swap (void)
2742 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2743 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2744 p
+= emit_mov (p
, x0
, register_operand (x1
));
2746 emit_ops_insns (buf
, p
- buf
);
2749 /* Implementation of emit_ops method "emit_stack_adjust". */
2752 aarch64_emit_stack_adjust (int n
)
2754 /* This is not needed with our design. */
2758 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2760 emit_ops_insns (buf
, p
- buf
);
2763 /* Implementation of emit_ops method "emit_int_call_1". */
2766 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2771 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2773 emit_ops_insns (buf
, p
- buf
);
2775 aarch64_emit_call (fn
);
2778 /* Implementation of emit_ops method "emit_void_call_2". */
2781 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2786 /* Push x0 on the stack. */
2787 aarch64_emit_stack_flush ();
2789 /* Setup arguments for the function call:
2792 x1: top of the stack
2797 p
+= emit_mov (p
, x1
, register_operand (x0
));
2798 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2800 emit_ops_insns (buf
, p
- buf
);
2802 aarch64_emit_call (fn
);
2805 aarch64_emit_pop ();
2808 /* Implementation of emit_ops method "emit_eq_goto". */
2811 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2816 p
+= emit_pop (p
, x1
);
2817 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2818 /* Branch over the next instruction if x0 != x1. */
2819 p
+= emit_bcond (p
, NE
, 8);
2820 /* The NOP instruction will be patched with an unconditional branch. */
2822 *offset_p
= (p
- buf
) * 4;
2827 emit_ops_insns (buf
, p
- buf
);
2830 /* Implementation of emit_ops method "emit_ne_goto". */
2833 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2838 p
+= emit_pop (p
, x1
);
2839 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2840 /* Branch over the next instruction if x0 == x1. */
2841 p
+= emit_bcond (p
, EQ
, 8);
2842 /* The NOP instruction will be patched with an unconditional branch. */
2844 *offset_p
= (p
- buf
) * 4;
2849 emit_ops_insns (buf
, p
- buf
);
2852 /* Implementation of emit_ops method "emit_lt_goto". */
2855 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2860 p
+= emit_pop (p
, x1
);
2861 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2862 /* Branch over the next instruction if x0 >= x1. */
2863 p
+= emit_bcond (p
, GE
, 8);
2864 /* The NOP instruction will be patched with an unconditional branch. */
2866 *offset_p
= (p
- buf
) * 4;
2871 emit_ops_insns (buf
, p
- buf
);
2874 /* Implementation of emit_ops method "emit_le_goto". */
2877 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2882 p
+= emit_pop (p
, x1
);
2883 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2884 /* Branch over the next instruction if x0 > x1. */
2885 p
+= emit_bcond (p
, GT
, 8);
2886 /* The NOP instruction will be patched with an unconditional branch. */
2888 *offset_p
= (p
- buf
) * 4;
2893 emit_ops_insns (buf
, p
- buf
);
2896 /* Implementation of emit_ops method "emit_gt_goto". */
2899 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2904 p
+= emit_pop (p
, x1
);
2905 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2906 /* Branch over the next instruction if x0 <= x1. */
2907 p
+= emit_bcond (p
, LE
, 8);
2908 /* The NOP instruction will be patched with an unconditional branch. */
2910 *offset_p
= (p
- buf
) * 4;
2915 emit_ops_insns (buf
, p
- buf
);
2918 /* Implementation of emit_ops method "emit_ge_got". */
2921 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2926 p
+= emit_pop (p
, x1
);
2927 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2928 /* Branch over the next instruction if x0 <= x1. */
2929 p
+= emit_bcond (p
, LT
, 8);
2930 /* The NOP instruction will be patched with an unconditional branch. */
2932 *offset_p
= (p
- buf
) * 4;
2937 emit_ops_insns (buf
, p
- buf
);
2940 static struct emit_ops aarch64_emit_ops_impl
=
2942 aarch64_emit_prologue
,
2943 aarch64_emit_epilogue
,
2948 aarch64_emit_rsh_signed
,
2949 aarch64_emit_rsh_unsigned
,
2951 aarch64_emit_log_not
,
2952 aarch64_emit_bit_and
,
2953 aarch64_emit_bit_or
,
2954 aarch64_emit_bit_xor
,
2955 aarch64_emit_bit_not
,
2957 aarch64_emit_less_signed
,
2958 aarch64_emit_less_unsigned
,
2960 aarch64_emit_if_goto
,
2962 aarch64_write_goto_address
,
2967 aarch64_emit_stack_flush
,
2968 aarch64_emit_zero_ext
,
2970 aarch64_emit_stack_adjust
,
2971 aarch64_emit_int_call_1
,
2972 aarch64_emit_void_call_2
,
2973 aarch64_emit_eq_goto
,
2974 aarch64_emit_ne_goto
,
2975 aarch64_emit_lt_goto
,
2976 aarch64_emit_le_goto
,
2977 aarch64_emit_gt_goto
,
2978 aarch64_emit_ge_got
,
2981 /* Implementation of linux_target_ops method "emit_ops". */
2983 static struct emit_ops
*
2984 aarch64_emit_ops (void)
2986 return &aarch64_emit_ops_impl
;
2989 /* Implementation of linux_target_ops method
2990 "get_min_fast_tracepoint_insn_len". */
2993 aarch64_get_min_fast_tracepoint_insn_len (void)
2998 /* Implementation of linux_target_ops method "supports_range_stepping". */
3001 aarch64_supports_range_stepping (void)
3006 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3008 static const gdb_byte
*
3009 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3011 if (is_64bit_tdesc ())
3013 *size
= aarch64_breakpoint_len
;
3014 return aarch64_breakpoint
;
3017 return arm_sw_breakpoint_from_kind (kind
, size
);
3020 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3023 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3025 if (is_64bit_tdesc ())
3026 return aarch64_breakpoint_len
;
3028 return arm_breakpoint_kind_from_pc (pcptr
);
3031 /* Implementation of the linux_target_ops method
3032 "breakpoint_kind_from_current_state". */
3035 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3037 if (is_64bit_tdesc ())
3038 return aarch64_breakpoint_len
;
3040 return arm_breakpoint_kind_from_current_state (pcptr
);
3043 /* Support for hardware single step. */
3046 aarch64_supports_hardware_single_step (void)
3051 struct linux_target_ops the_low_target
=
3055 NULL
, /* cannot_fetch_register */
3056 NULL
, /* cannot_store_register */
3057 NULL
, /* fetch_register */
3060 aarch64_breakpoint_kind_from_pc
,
3061 aarch64_sw_breakpoint_from_kind
,
3062 NULL
, /* get_next_pcs */
3063 0, /* decr_pc_after_break */
3064 aarch64_breakpoint_at
,
3065 aarch64_supports_z_point_type
,
3066 aarch64_insert_point
,
3067 aarch64_remove_point
,
3068 aarch64_stopped_by_watchpoint
,
3069 aarch64_stopped_data_address
,
3070 NULL
, /* collect_ptrace_register */
3071 NULL
, /* supply_ptrace_register */
3072 aarch64_linux_siginfo_fixup
,
3073 aarch64_linux_new_process
,
3074 aarch64_linux_delete_process
,
3075 aarch64_linux_new_thread
,
3076 aarch64_linux_delete_thread
,
3077 aarch64_linux_new_fork
,
3078 aarch64_linux_prepare_to_resume
,
3079 NULL
, /* process_qsupported */
3080 aarch64_supports_tracepoints
,
3081 aarch64_get_thread_area
,
3082 aarch64_install_fast_tracepoint_jump_pad
,
3084 aarch64_get_min_fast_tracepoint_insn_len
,
3085 aarch64_supports_range_stepping
,
3086 aarch64_breakpoint_kind_from_current_state
,
3087 aarch64_supports_hardware_single_step
,
3088 aarch64_get_syscall_trapinfo
,
3092 initialize_low_arch (void)
3094 initialize_low_arch_aarch32 ();
3096 initialize_regsets_info (&aarch64_regsets_info
);
3097 initialize_regsets_info (&aarch64_sve_regsets_info
);