1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2018 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43 #include "nat/aarch64-sve-linux-ptrace.h"
50 /* Per-process arch-specific data we want to keep. */
52 struct arch_process_info
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
64 struct aarch64_debug_reg_state debug_reg_state
;
67 /* Return true if the size of register 0 is 8 byte. */
72 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
74 return register_size (regcache
->tdesc
, 0) == 8;
77 /* Return true if the regcache contains the number of SVE registers. */
82 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
84 return regcache
->tdesc
->reg_defs
.size () == AARCH64_SVE_NUM_REGS
;
87 /* Implementation of linux_target_ops method "cannot_store_register". */
90 aarch64_cannot_store_register (int regno
)
92 return regno
>= AARCH64_NUM_REGS
;
95 /* Implementation of linux_target_ops method "cannot_fetch_register". */
98 aarch64_cannot_fetch_register (int regno
)
100 return regno
>= AARCH64_NUM_REGS
;
104 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
106 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
109 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
110 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
111 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
112 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
113 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
117 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
119 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
122 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
123 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
124 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
125 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
126 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
130 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
132 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
135 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
136 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
137 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
138 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
142 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
144 const struct user_fpsimd_state
*regset
145 = (const struct user_fpsimd_state
*) buf
;
148 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
149 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
150 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
151 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
154 /* Enable miscellaneous debugging output. The name is historical - it
155 was originally used to debug LinuxThreads support. */
156 extern int debug_threads
;
158 /* Implementation of linux_target_ops method "get_pc". */
161 aarch64_get_pc (struct regcache
*regcache
)
163 if (register_size (regcache
->tdesc
, 0) == 8)
164 return linux_get_pc_64bit (regcache
);
166 return linux_get_pc_32bit (regcache
);
169 /* Implementation of linux_target_ops method "set_pc". */
172 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
174 if (register_size (regcache
->tdesc
, 0) == 8)
175 linux_set_pc_64bit (regcache
, pc
);
177 linux_set_pc_32bit (regcache
, pc
);
180 #define aarch64_breakpoint_len 4
182 /* AArch64 BRK software debug mode instruction.
183 This instruction needs to match gdb/aarch64-tdep.c
184 (aarch64_default_breakpoint). */
185 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
187 /* Implementation of linux_target_ops method "breakpoint_at". */
190 aarch64_breakpoint_at (CORE_ADDR where
)
192 if (is_64bit_tdesc ())
194 gdb_byte insn
[aarch64_breakpoint_len
];
196 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
197 aarch64_breakpoint_len
);
198 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
204 return arm_breakpoint_at (where
);
208 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
212 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
214 state
->dr_addr_bp
[i
] = 0;
215 state
->dr_ctrl_bp
[i
] = 0;
216 state
->dr_ref_count_bp
[i
] = 0;
219 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
221 state
->dr_addr_wp
[i
] = 0;
222 state
->dr_ctrl_wp
[i
] = 0;
223 state
->dr_ref_count_wp
[i
] = 0;
227 /* Return the pointer to the debug register state structure in the
228 current process' arch-specific data area. */
230 struct aarch64_debug_reg_state
*
231 aarch64_get_debug_reg_state (pid_t pid
)
233 struct process_info
*proc
= find_process_pid (pid
);
235 return &proc
->priv
->arch_private
->debug_reg_state
;
238 /* Implementation of linux_target_ops method "supports_z_point_type". */
241 aarch64_supports_z_point_type (char z_type
)
247 case Z_PACKET_WRITE_WP
:
248 case Z_PACKET_READ_WP
:
249 case Z_PACKET_ACCESS_WP
:
256 /* Implementation of linux_target_ops method "insert_point".
258 It actually only records the info of the to-be-inserted bp/wp;
259 the actual insertion will happen when threads are resumed. */
262 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
263 int len
, struct raw_breakpoint
*bp
)
266 enum target_hw_bp_type targ_type
;
267 struct aarch64_debug_reg_state
*state
268 = aarch64_get_debug_reg_state (pid_of (current_thread
));
271 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
272 (unsigned long) addr
, len
);
274 /* Determine the type from the raw breakpoint type. */
275 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
277 if (targ_type
!= hw_execute
)
279 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
280 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
281 1 /* is_insert */, state
);
289 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
290 instruction. Set it to 2 to correctly encode length bit
291 mask in hardware/watchpoint control register. */
294 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
295 1 /* is_insert */, state
);
299 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
305 /* Implementation of linux_target_ops method "remove_point".
307 It actually only records the info of the to-be-removed bp/wp,
308 the actual removal will be done when threads are resumed. */
311 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
312 int len
, struct raw_breakpoint
*bp
)
315 enum target_hw_bp_type targ_type
;
316 struct aarch64_debug_reg_state
*state
317 = aarch64_get_debug_reg_state (pid_of (current_thread
));
320 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
321 (unsigned long) addr
, len
);
323 /* Determine the type from the raw breakpoint type. */
324 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
326 /* Set up state pointers. */
327 if (targ_type
!= hw_execute
)
329 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
335 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
336 instruction. Set it to 2 to correctly encode length bit
337 mask in hardware/watchpoint control register. */
340 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
341 0 /* is_insert */, state
);
345 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
351 /* Implementation of linux_target_ops method "stopped_data_address". */
354 aarch64_stopped_data_address (void)
358 struct aarch64_debug_reg_state
*state
;
360 pid
= lwpid_of (current_thread
);
362 /* Get the siginfo. */
363 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
364 return (CORE_ADDR
) 0;
366 /* Need to be a hardware breakpoint/watchpoint trap. */
367 if (siginfo
.si_signo
!= SIGTRAP
368 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
369 return (CORE_ADDR
) 0;
371 /* Check if the address matches any watched address. */
372 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
373 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
375 const unsigned int offset
376 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
377 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
378 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
379 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
380 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
381 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
383 if (state
->dr_ref_count_wp
[i
]
384 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
385 && addr_trap
>= addr_watch_aligned
386 && addr_trap
< addr_watch
+ len
)
388 /* ADDR_TRAP reports the first address of the memory range
389 accessed by the CPU, regardless of what was the memory
390 range watched. Thus, a large CPU access that straddles
391 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
392 ADDR_TRAP that is lower than the
393 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
395 addr: | 4 | 5 | 6 | 7 | 8 |
396 |---- range watched ----|
397 |----------- range accessed ------------|
399 In this case, ADDR_TRAP will be 4.
401 To match a watchpoint known to GDB core, we must never
402 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
403 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
404 positive on kernels older than 4.10. See PR
410 return (CORE_ADDR
) 0;
413 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
416 aarch64_stopped_by_watchpoint (void)
418 if (aarch64_stopped_data_address () != 0)
424 /* Fetch the thread-local storage pointer for libthread_db. */
427 ps_get_thread_area (struct ps_prochandle
*ph
,
428 lwpid_t lwpid
, int idx
, void **base
)
430 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
434 /* Implementation of linux_target_ops method "siginfo_fixup". */
437 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
439 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
440 if (!is_64bit_tdesc ())
443 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
446 aarch64_siginfo_from_compat_siginfo (native
,
447 (struct compat_siginfo
*) inf
);
455 /* Implementation of linux_target_ops method "new_process". */
457 static struct arch_process_info
*
458 aarch64_linux_new_process (void)
460 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
462 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
467 /* Implementation of linux_target_ops method "delete_process". */
470 aarch64_linux_delete_process (struct arch_process_info
*info
)
475 /* Implementation of linux_target_ops method "linux_new_fork". */
478 aarch64_linux_new_fork (struct process_info
*parent
,
479 struct process_info
*child
)
481 /* These are allocated by linux_add_process. */
482 gdb_assert (parent
->priv
!= NULL
483 && parent
->priv
->arch_private
!= NULL
);
484 gdb_assert (child
->priv
!= NULL
485 && child
->priv
->arch_private
!= NULL
);
487 /* Linux kernel before 2.6.33 commit
488 72f674d203cd230426437cdcf7dd6f681dad8b0d
489 will inherit hardware debug registers from parent
490 on fork/vfork/clone. Newer Linux kernels create such tasks with
491 zeroed debug registers.
493 GDB core assumes the child inherits the watchpoints/hw
494 breakpoints of the parent, and will remove them all from the
495 forked off process. Copy the debug registers mirrors into the
496 new process so that all breakpoints and watchpoints can be
497 removed together. The debug registers mirror will become zeroed
498 in the end before detaching the forked off process, thus making
499 this compatible with older Linux kernels too. */
501 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
504 /* Implementation of linux_target_ops method "arch_setup". */
507 aarch64_arch_setup (void)
509 unsigned int machine
;
513 tid
= lwpid_of (current_thread
);
515 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
519 uint64_t vq
= aarch64_sve_get_vq (tid
);
520 current_process ()->tdesc
= aarch64_linux_read_description (vq
);
523 current_process ()->tdesc
= tdesc_arm_with_neon
;
525 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
528 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
531 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
533 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
536 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
539 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
541 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
544 static struct regset_info aarch64_regsets
[] =
546 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
547 sizeof (struct user_pt_regs
), GENERAL_REGS
,
548 aarch64_fill_gregset
, aarch64_store_gregset
},
549 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
550 sizeof (struct user_fpsimd_state
), FP_REGS
,
551 aarch64_fill_fpregset
, aarch64_store_fpregset
556 static struct regsets_info aarch64_regsets_info
=
558 aarch64_regsets
, /* regsets */
560 NULL
, /* disabled_regsets */
563 static struct regs_info regs_info_aarch64
=
565 NULL
, /* regset_bitmap */
567 &aarch64_regsets_info
,
570 static struct regset_info aarch64_sve_regsets
[] =
572 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
573 sizeof (struct user_pt_regs
), GENERAL_REGS
,
574 aarch64_fill_gregset
, aarch64_store_gregset
},
575 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
576 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
577 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
582 static struct regsets_info aarch64_sve_regsets_info
=
584 aarch64_sve_regsets
, /* regsets. */
585 0, /* num_regsets. */
586 NULL
, /* disabled_regsets. */
589 static struct regs_info regs_info_aarch64_sve
=
591 NULL
, /* regset_bitmap. */
593 &aarch64_sve_regsets_info
,
596 /* Implementation of linux_target_ops method "regs_info". */
598 static const struct regs_info
*
599 aarch64_regs_info (void)
601 if (!is_64bit_tdesc ())
602 return ®s_info_aarch32
;
605 return ®s_info_aarch64_sve
;
607 return ®s_info_aarch64
;
610 /* Implementation of linux_target_ops method "supports_tracepoints". */
613 aarch64_supports_tracepoints (void)
615 if (current_thread
== NULL
)
619 /* We don't support tracepoints on aarch32 now. */
620 return is_64bit_tdesc ();
624 /* Implementation of linux_target_ops method "get_thread_area". */
627 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
632 iovec
.iov_base
= ®
;
633 iovec
.iov_len
= sizeof (reg
);
635 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
643 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
646 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
648 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
654 collect_register_by_name (regcache
, "x8", &l_sysno
);
655 *sysno
= (int) l_sysno
;
658 collect_register_by_name (regcache
, "r7", sysno
);
661 /* List of condition codes that we need. */
663 enum aarch64_condition_codes
674 enum aarch64_operand_type
680 /* Representation of an operand. At this time, it only supports register
681 and immediate types. */
683 struct aarch64_operand
685 /* Type of the operand. */
686 enum aarch64_operand_type type
;
688 /* Value of the operand according to the type. */
692 struct aarch64_register reg
;
696 /* List of registers that we are currently using, we can add more here as
697 we need to use them. */
699 /* General purpose scratch registers (64 bit). */
700 static const struct aarch64_register x0
= { 0, 1 };
701 static const struct aarch64_register x1
= { 1, 1 };
702 static const struct aarch64_register x2
= { 2, 1 };
703 static const struct aarch64_register x3
= { 3, 1 };
704 static const struct aarch64_register x4
= { 4, 1 };
706 /* General purpose scratch registers (32 bit). */
707 static const struct aarch64_register w0
= { 0, 0 };
708 static const struct aarch64_register w2
= { 2, 0 };
710 /* Intra-procedure scratch registers. */
711 static const struct aarch64_register ip0
= { 16, 1 };
713 /* Special purpose registers. */
714 static const struct aarch64_register fp
= { 29, 1 };
715 static const struct aarch64_register lr
= { 30, 1 };
716 static const struct aarch64_register sp
= { 31, 1 };
717 static const struct aarch64_register xzr
= { 31, 1 };
719 /* Dynamically allocate a new register. If we know the register
720 statically, we should make it a global as above instead of using this
723 static struct aarch64_register
724 aarch64_register (unsigned num
, int is64
)
726 return (struct aarch64_register
) { num
, is64
};
729 /* Helper function to create a register operand, for instructions with
730 different types of operands.
733 p += emit_mov (p, x0, register_operand (x1)); */
735 static struct aarch64_operand
736 register_operand (struct aarch64_register reg
)
738 struct aarch64_operand operand
;
740 operand
.type
= OPERAND_REGISTER
;
746 /* Helper function to create an immediate operand, for instructions with
747 different types of operands.
750 p += emit_mov (p, x0, immediate_operand (12)); */
752 static struct aarch64_operand
753 immediate_operand (uint32_t imm
)
755 struct aarch64_operand operand
;
757 operand
.type
= OPERAND_IMMEDIATE
;
763 /* Helper function to create an offset memory operand.
766 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
768 static struct aarch64_memory_operand
769 offset_memory_operand (int32_t offset
)
771 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
774 /* Helper function to create a pre-index memory operand.
777 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
779 static struct aarch64_memory_operand
780 preindex_memory_operand (int32_t index
)
782 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
785 /* Helper function to create a post-index memory operand.
788 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
790 static struct aarch64_memory_operand
791 postindex_memory_operand (int32_t index
)
793 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
796 /* System control registers. These special registers can be written and
797 read with the MRS and MSR instructions.
799 - NZCV: Condition flags. GDB refers to this register under the CPSR
801 - FPSR: Floating-point status register.
802 - FPCR: Floating-point control registers.
803 - TPIDR_EL0: Software thread ID register. */
805 enum aarch64_system_control_registers
807 /* op0 op1 crn crm op2 */
808 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
809 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
810 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
811 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
814 /* Write a BLR instruction into *BUF.
818 RN is the register to branch to. */
821 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
823 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
826 /* Write a RET instruction into *BUF.
830 RN is the register to branch to. */
833 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
835 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
839 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
840 struct aarch64_register rt
,
841 struct aarch64_register rt2
,
842 struct aarch64_register rn
,
843 struct aarch64_memory_operand operand
)
850 opc
= ENCODE (2, 2, 30);
852 opc
= ENCODE (0, 2, 30);
854 switch (operand
.type
)
856 case MEMORY_OPERAND_OFFSET
:
858 pre_index
= ENCODE (1, 1, 24);
859 write_back
= ENCODE (0, 1, 23);
862 case MEMORY_OPERAND_POSTINDEX
:
864 pre_index
= ENCODE (0, 1, 24);
865 write_back
= ENCODE (1, 1, 23);
868 case MEMORY_OPERAND_PREINDEX
:
870 pre_index
= ENCODE (1, 1, 24);
871 write_back
= ENCODE (1, 1, 23);
878 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
879 | ENCODE (operand
.index
>> 3, 7, 15)
880 | ENCODE (rt2
.num
, 5, 10)
881 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
884 /* Write a STP instruction into *BUF.
886 STP rt, rt2, [rn, #offset]
887 STP rt, rt2, [rn, #index]!
888 STP rt, rt2, [rn], #index
890 RT and RT2 are the registers to store.
891 RN is the base address register.
892 OFFSET is the immediate to add to the base address. It is limited to a
893 -512 .. 504 range (7 bits << 3). */
896 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
897 struct aarch64_register rt2
, struct aarch64_register rn
,
898 struct aarch64_memory_operand operand
)
900 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
903 /* Write a LDP instruction into *BUF.
905 LDP rt, rt2, [rn, #offset]
906 LDP rt, rt2, [rn, #index]!
907 LDP rt, rt2, [rn], #index
909 RT and RT2 are the registers to store.
910 RN is the base address register.
911 OFFSET is the immediate to add to the base address. It is limited to a
912 -512 .. 504 range (7 bits << 3). */
915 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
916 struct aarch64_register rt2
, struct aarch64_register rn
,
917 struct aarch64_memory_operand operand
)
919 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
922 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
924 LDP qt, qt2, [rn, #offset]
926 RT and RT2 are the Q registers to store.
927 RN is the base address register.
928 OFFSET is the immediate to add to the base address. It is limited to
929 -1024 .. 1008 range (7 bits << 4). */
932 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
933 struct aarch64_register rn
, int32_t offset
)
935 uint32_t opc
= ENCODE (2, 2, 30);
936 uint32_t pre_index
= ENCODE (1, 1, 24);
938 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
939 | ENCODE (offset
>> 4, 7, 15)
940 | ENCODE (rt2
, 5, 10)
941 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
944 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
946 STP qt, qt2, [rn, #offset]
948 RT and RT2 are the Q registers to store.
949 RN is the base address register.
950 OFFSET is the immediate to add to the base address. It is limited to
951 -1024 .. 1008 range (7 bits << 4). */
954 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
955 struct aarch64_register rn
, int32_t offset
)
957 uint32_t opc
= ENCODE (2, 2, 30);
958 uint32_t pre_index
= ENCODE (1, 1, 24);
960 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
961 | ENCODE (offset
>> 4, 7, 15)
962 | ENCODE (rt2
, 5, 10)
963 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
966 /* Write a LDRH instruction into *BUF.
968 LDRH wt, [xn, #offset]
969 LDRH wt, [xn, #index]!
970 LDRH wt, [xn], #index
972 RT is the register to store.
973 RN is the base address register.
974 OFFSET is the immediate to add to the base address. It is limited to
975 0 .. 32760 range (12 bits << 3). */
978 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
979 struct aarch64_register rn
,
980 struct aarch64_memory_operand operand
)
982 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
985 /* Write a LDRB instruction into *BUF.
987 LDRB wt, [xn, #offset]
988 LDRB wt, [xn, #index]!
989 LDRB wt, [xn], #index
991 RT is the register to store.
992 RN is the base address register.
993 OFFSET is the immediate to add to the base address. It is limited to
994 0 .. 32760 range (12 bits << 3). */
997 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
998 struct aarch64_register rn
,
999 struct aarch64_memory_operand operand
)
1001 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1006 /* Write a STR instruction into *BUF.
1008 STR rt, [rn, #offset]
1009 STR rt, [rn, #index]!
1010 STR rt, [rn], #index
1012 RT is the register to store.
1013 RN is the base address register.
1014 OFFSET is the immediate to add to the base address. It is limited to
1015 0 .. 32760 range (12 bits << 3). */
1018 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1019 struct aarch64_register rn
,
1020 struct aarch64_memory_operand operand
)
1022 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1025 /* Helper function emitting an exclusive load or store instruction. */
1028 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1029 enum aarch64_opcodes opcode
,
1030 struct aarch64_register rs
,
1031 struct aarch64_register rt
,
1032 struct aarch64_register rt2
,
1033 struct aarch64_register rn
)
1035 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1036 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1037 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1040 /* Write a LAXR instruction into *BUF.
1044 RT is the destination register.
1045 RN is the base address register. */
1048 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1049 struct aarch64_register rn
)
1051 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1055 /* Write a STXR instruction into *BUF.
1059 RS is the result register, it indicates if the store succeeded or not.
1060 RT is the destination register.
1061 RN is the base address register. */
1064 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1065 struct aarch64_register rt
, struct aarch64_register rn
)
1067 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1071 /* Write a STLR instruction into *BUF.
1075 RT is the register to store.
1076 RN is the base address register. */
1079 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1080 struct aarch64_register rn
)
1082 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1086 /* Helper function for data processing instructions with register sources. */
1089 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1090 struct aarch64_register rd
,
1091 struct aarch64_register rn
,
1092 struct aarch64_register rm
)
1094 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1096 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1097 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1100 /* Helper function for data processing instructions taking either a register
1104 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1105 struct aarch64_register rd
,
1106 struct aarch64_register rn
,
1107 struct aarch64_operand operand
)
1109 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1110 /* The opcode is different for register and immediate source operands. */
1111 uint32_t operand_opcode
;
1113 if (operand
.type
== OPERAND_IMMEDIATE
)
1115 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1116 operand_opcode
= ENCODE (8, 4, 25);
1118 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1119 | ENCODE (operand
.imm
, 12, 10)
1120 | ENCODE (rn
.num
, 5, 5)
1121 | ENCODE (rd
.num
, 5, 0));
1125 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1126 operand_opcode
= ENCODE (5, 4, 25);
1128 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1133 /* Write an ADD instruction into *BUF.
1138 This function handles both an immediate and register add.
1140 RD is the destination register.
1141 RN is the input register.
1142 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1143 OPERAND_REGISTER. */
1146 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1147 struct aarch64_register rn
, struct aarch64_operand operand
)
1149 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1152 /* Write a SUB instruction into *BUF.
1157 This function handles both an immediate and register sub.
1159 RD is the destination register.
1160 RN is the input register.
1161 IMM is the immediate to substract to RN. */
1164 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1165 struct aarch64_register rn
, struct aarch64_operand operand
)
1167 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1170 /* Write a MOV instruction into *BUF.
1175 This function handles both a wide immediate move and a register move,
1176 with the condition that the source register is not xzr. xzr and the
1177 stack pointer share the same encoding and this function only supports
1180 RD is the destination register.
1181 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1182 OPERAND_REGISTER. */
1185 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1186 struct aarch64_operand operand
)
1188 if (operand
.type
== OPERAND_IMMEDIATE
)
1190 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1191 /* Do not shift the immediate. */
1192 uint32_t shift
= ENCODE (0, 2, 21);
1194 return aarch64_emit_insn (buf
, MOV
| size
| shift
1195 | ENCODE (operand
.imm
, 16, 5)
1196 | ENCODE (rd
.num
, 5, 0));
1199 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1202 /* Write a MOVK instruction into *BUF.
1204 MOVK rd, #imm, lsl #shift
1206 RD is the destination register.
1207 IMM is the immediate.
1208 SHIFT is the logical shift left to apply to IMM. */
1211 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1214 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1216 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1217 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1220 /* Write instructions into *BUF in order to move ADDR into a register.
1221 ADDR can be a 64-bit value.
1223 This function will emit a series of MOV and MOVK instructions, such as:
1226 MOVK xd, #(addr >> 16), lsl #16
1227 MOVK xd, #(addr >> 32), lsl #32
1228 MOVK xd, #(addr >> 48), lsl #48 */
1231 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1235 /* The MOV (wide immediate) instruction clears to top bits of the
1237 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1239 if ((addr
>> 16) != 0)
1240 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1244 if ((addr
>> 32) != 0)
1245 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1249 if ((addr
>> 48) != 0)
1250 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1255 /* Write a SUBS instruction into *BUF.
1259 This instruction update the condition flags.
1261 RD is the destination register.
1262 RN and RM are the source registers. */
1265 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1266 struct aarch64_register rn
, struct aarch64_operand operand
)
1268 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1271 /* Write a CMP instruction into *BUF.
1275 This instruction is an alias of SUBS xzr, rn, rm.
1277 RN and RM are the registers to compare. */
1280 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1281 struct aarch64_operand operand
)
1283 return emit_subs (buf
, xzr
, rn
, operand
);
1286 /* Write a AND instruction into *BUF.
1290 RD is the destination register.
1291 RN and RM are the source registers. */
1294 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1295 struct aarch64_register rn
, struct aarch64_register rm
)
1297 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1300 /* Write a ORR instruction into *BUF.
1304 RD is the destination register.
1305 RN and RM are the source registers. */
1308 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1309 struct aarch64_register rn
, struct aarch64_register rm
)
1311 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1314 /* Write a ORN instruction into *BUF.
1318 RD is the destination register.
1319 RN and RM are the source registers. */
1322 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1323 struct aarch64_register rn
, struct aarch64_register rm
)
1325 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1328 /* Write a EOR instruction into *BUF.
1332 RD is the destination register.
1333 RN and RM are the source registers. */
1336 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1337 struct aarch64_register rn
, struct aarch64_register rm
)
1339 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1342 /* Write a MVN instruction into *BUF.
1346 This is an alias for ORN rd, xzr, rm.
1348 RD is the destination register.
1349 RM is the source register. */
1352 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1353 struct aarch64_register rm
)
1355 return emit_orn (buf
, rd
, xzr
, rm
);
1358 /* Write a LSLV instruction into *BUF.
1362 RD is the destination register.
1363 RN and RM are the source registers. */
1366 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1367 struct aarch64_register rn
, struct aarch64_register rm
)
1369 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1372 /* Write a LSRV instruction into *BUF.
1376 RD is the destination register.
1377 RN and RM are the source registers. */
1380 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1381 struct aarch64_register rn
, struct aarch64_register rm
)
1383 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1386 /* Write a ASRV instruction into *BUF.
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1394 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1395 struct aarch64_register rn
, struct aarch64_register rm
)
1397 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1400 /* Write a MUL instruction into *BUF.
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1408 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1409 struct aarch64_register rn
, struct aarch64_register rm
)
1411 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1414 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1418 RT is the destination register.
1419 SYSTEM_REG is special purpose register to read. */
1422 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1423 enum aarch64_system_control_registers system_reg
)
1425 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1426 | ENCODE (rt
.num
, 5, 0));
1429 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1433 SYSTEM_REG is special purpose register to write.
1434 RT is the input register. */
1437 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1438 struct aarch64_register rt
)
1440 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1441 | ENCODE (rt
.num
, 5, 0));
1444 /* Write a SEVL instruction into *BUF.
1446 This is a hint instruction telling the hardware to trigger an event. */
1449 emit_sevl (uint32_t *buf
)
1451 return aarch64_emit_insn (buf
, SEVL
);
1454 /* Write a WFE instruction into *BUF.
1456 This is a hint instruction telling the hardware to wait for an event. */
1459 emit_wfe (uint32_t *buf
)
1461 return aarch64_emit_insn (buf
, WFE
);
1464 /* Write a SBFM instruction into *BUF.
1466 SBFM rd, rn, #immr, #imms
1468 This instruction moves the bits from #immr to #imms into the
1469 destination, sign extending the result.
1471 RD is the destination register.
1472 RN is the source register.
1473 IMMR is the bit number to start at (least significant bit).
1474 IMMS is the bit number to stop at (most significant bit). */
1477 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1478 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1480 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1481 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1483 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1484 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1485 | ENCODE (rd
.num
, 5, 0));
1488 /* Write a SBFX instruction into *BUF.
1490 SBFX rd, rn, #lsb, #width
1492 This instruction moves #width bits from #lsb into the destination, sign
1493 extending the result. This is an alias for:
1495 SBFM rd, rn, #lsb, #(lsb + width - 1)
1497 RD is the destination register.
1498 RN is the source register.
1499 LSB is the bit number to start at (least significant bit).
1500 WIDTH is the number of bits to move. */
1503 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1504 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1506 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1509 /* Write a UBFM instruction into *BUF.
1511 UBFM rd, rn, #immr, #imms
1513 This instruction moves the bits from #immr to #imms into the
1514 destination, extending the result with zeros.
1516 RD is the destination register.
1517 RN is the source register.
1518 IMMR is the bit number to start at (least significant bit).
1519 IMMS is the bit number to stop at (most significant bit). */
1522 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1523 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1525 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1526 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1528 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1529 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1530 | ENCODE (rd
.num
, 5, 0));
1533 /* Write a UBFX instruction into *BUF.
1535 UBFX rd, rn, #lsb, #width
1537 This instruction moves #width bits from #lsb into the destination,
1538 extending the result with zeros. This is an alias for:
1540 UBFM rd, rn, #lsb, #(lsb + width - 1)
1542 RD is the destination register.
1543 RN is the source register.
1544 LSB is the bit number to start at (least significant bit).
1545 WIDTH is the number of bits to move. */
1548 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1549 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1551 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1554 /* Write a CSINC instruction into *BUF.
1556 CSINC rd, rn, rm, cond
1558 This instruction conditionally increments rn or rm and places the result
1559 in rd. rn is chosen is the condition is true.
1561 RD is the destination register.
1562 RN and RM are the source registers.
1563 COND is the encoded condition. */
1566 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1567 struct aarch64_register rn
, struct aarch64_register rm
,
1570 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1572 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1573 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1574 | ENCODE (rd
.num
, 5, 0));
1577 /* Write a CSET instruction into *BUF.
1581 This instruction conditionally write 1 or 0 in the destination register.
1582 1 is written if the condition is true. This is an alias for:
1584 CSINC rd, xzr, xzr, !cond
1586 Note that the condition needs to be inverted.
1588 RD is the destination register.
1589 RN and RM are the source registers.
1590 COND is the encoded condition. */
1593 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1595 /* The least significant bit of the condition needs toggling in order to
1597 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1600 /* Write LEN instructions from BUF into the inferior memory at *TO.
1602 Note instructions are always little endian on AArch64, unlike data. */
1605 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1607 size_t byte_len
= len
* sizeof (uint32_t);
1608 #if (__BYTE_ORDER == __BIG_ENDIAN)
1609 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1612 for (i
= 0; i
< len
; i
++)
1613 le_buf
[i
] = htole32 (buf
[i
]);
1615 write_inferior_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1619 write_inferior_memory (*to
, (const unsigned char *) buf
, byte_len
);
1625 /* Sub-class of struct aarch64_insn_data, store information of
1626 instruction relocation for fast tracepoint. Visitor can
1627 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1628 the relocated instructions in buffer pointed by INSN_PTR. */
1630 struct aarch64_insn_relocation_data
1632 struct aarch64_insn_data base
;
1634 /* The new address the instruction is relocated to. */
1636 /* Pointer to the buffer of relocated instruction(s). */
1640 /* Implementation of aarch64_insn_visitor method "b". */
1643 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1644 struct aarch64_insn_data
*data
)
1646 struct aarch64_insn_relocation_data
*insn_reloc
1647 = (struct aarch64_insn_relocation_data
*) data
;
1649 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1651 if (can_encode_int32 (new_offset
, 28))
1652 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1655 /* Implementation of aarch64_insn_visitor method "b_cond". */
1658 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1659 struct aarch64_insn_data
*data
)
1661 struct aarch64_insn_relocation_data
*insn_reloc
1662 = (struct aarch64_insn_relocation_data
*) data
;
1664 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1666 if (can_encode_int32 (new_offset
, 21))
1668 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1671 else if (can_encode_int32 (new_offset
, 28))
1673 /* The offset is out of range for a conditional branch
1674 instruction but not for a unconditional branch. We can use
1675 the following instructions instead:
1677 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1678 B NOT_TAKEN ; Else jump over TAKEN and continue.
1685 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1686 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1687 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1691 /* Implementation of aarch64_insn_visitor method "cb". */
1694 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1695 const unsigned rn
, int is64
,
1696 struct aarch64_insn_data
*data
)
1698 struct aarch64_insn_relocation_data
*insn_reloc
1699 = (struct aarch64_insn_relocation_data
*) data
;
1701 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1703 if (can_encode_int32 (new_offset
, 21))
1705 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1706 aarch64_register (rn
, is64
), new_offset
);
1708 else if (can_encode_int32 (new_offset
, 28))
1710 /* The offset is out of range for a compare and branch
1711 instruction but not for a unconditional branch. We can use
1712 the following instructions instead:
1714 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1715 B NOT_TAKEN ; Else jump over TAKEN and continue.
1721 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1722 aarch64_register (rn
, is64
), 8);
1723 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1724 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1728 /* Implementation of aarch64_insn_visitor method "tb". */
1731 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1732 const unsigned rt
, unsigned bit
,
1733 struct aarch64_insn_data
*data
)
1735 struct aarch64_insn_relocation_data
*insn_reloc
1736 = (struct aarch64_insn_relocation_data
*) data
;
1738 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1740 if (can_encode_int32 (new_offset
, 16))
1742 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1743 aarch64_register (rt
, 1), new_offset
);
1745 else if (can_encode_int32 (new_offset
, 28))
1747 /* The offset is out of range for a test bit and branch
1748 instruction but not for a unconditional branch. We can use
1749 the following instructions instead:
1751 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1752 B NOT_TAKEN ; Else jump over TAKEN and continue.
1758 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1759 aarch64_register (rt
, 1), 8);
1760 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1761 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1766 /* Implementation of aarch64_insn_visitor method "adr". */
1769 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1771 struct aarch64_insn_data
*data
)
1773 struct aarch64_insn_relocation_data
*insn_reloc
1774 = (struct aarch64_insn_relocation_data
*) data
;
1775 /* We know exactly the address the ADR{P,} instruction will compute.
1776 We can just write it to the destination register. */
1777 CORE_ADDR address
= data
->insn_addr
+ offset
;
1781 /* Clear the lower 12 bits of the offset to get the 4K page. */
1782 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1783 aarch64_register (rd
, 1),
1787 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1788 aarch64_register (rd
, 1), address
);
1791 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1794 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1795 const unsigned rt
, const int is64
,
1796 struct aarch64_insn_data
*data
)
1798 struct aarch64_insn_relocation_data
*insn_reloc
1799 = (struct aarch64_insn_relocation_data
*) data
;
1800 CORE_ADDR address
= data
->insn_addr
+ offset
;
1802 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1803 aarch64_register (rt
, 1), address
);
1805 /* We know exactly what address to load from, and what register we
1808 MOV xd, #(oldloc + offset)
1809 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1812 LDR xd, [xd] ; or LDRSW xd, [xd]
1817 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1818 aarch64_register (rt
, 1),
1819 aarch64_register (rt
, 1),
1820 offset_memory_operand (0));
1822 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1823 aarch64_register (rt
, is64
),
1824 aarch64_register (rt
, 1),
1825 offset_memory_operand (0));
1828 /* Implementation of aarch64_insn_visitor method "others". */
1831 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1832 struct aarch64_insn_data
*data
)
1834 struct aarch64_insn_relocation_data
*insn_reloc
1835 = (struct aarch64_insn_relocation_data
*) data
;
1837 /* The instruction is not PC relative. Just re-emit it at the new
1839 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1842 static const struct aarch64_insn_visitor visitor
=
1844 aarch64_ftrace_insn_reloc_b
,
1845 aarch64_ftrace_insn_reloc_b_cond
,
1846 aarch64_ftrace_insn_reloc_cb
,
1847 aarch64_ftrace_insn_reloc_tb
,
1848 aarch64_ftrace_insn_reloc_adr
,
1849 aarch64_ftrace_insn_reloc_ldr_literal
,
1850 aarch64_ftrace_insn_reloc_others
,
1853 /* Implementation of linux_target_ops method
1854 "install_fast_tracepoint_jump_pad". */
1857 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1859 CORE_ADDR collector
,
1862 CORE_ADDR
*jump_entry
,
1863 CORE_ADDR
*trampoline
,
1864 ULONGEST
*trampoline_size
,
1865 unsigned char *jjump_pad_insn
,
1866 ULONGEST
*jjump_pad_insn_size
,
1867 CORE_ADDR
*adjusted_insn_addr
,
1868 CORE_ADDR
*adjusted_insn_addr_end
,
1876 CORE_ADDR buildaddr
= *jump_entry
;
1877 struct aarch64_insn_relocation_data insn_data
;
1879 /* We need to save the current state on the stack both to restore it
1880 later and to collect register values when the tracepoint is hit.
1882 The saved registers are pushed in a layout that needs to be in sync
1883 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1884 the supply_fast_tracepoint_registers function will fill in the
1885 register cache from a pointer to saved registers on the stack we build
1888 For simplicity, we set the size of each cell on the stack to 16 bytes.
1889 This way one cell can hold any register type, from system registers
1890 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1891 has to be 16 bytes aligned anyway.
1893 Note that the CPSR register does not exist on AArch64. Instead we
1894 can access system bits describing the process state with the
1895 MRS/MSR instructions, namely the condition flags. We save them as
1896 if they are part of a CPSR register because that's how GDB
1897 interprets these system bits. At the moment, only the condition
1898 flags are saved in CPSR (NZCV).
1900 Stack layout, each cell is 16 bytes (descending):
1902 High *-------- SIMD&FP registers from 31 down to 0. --------*
1908 *---- General purpose registers from 30 down to 0. ----*
1914 *------------- Special purpose registers. -------------*
1917 | CPSR (NZCV) | 5 cells
1920 *------------- collecting_t object --------------------*
1921 | TPIDR_EL0 | struct tracepoint * |
1922 Low *------------------------------------------------------*
1924 After this stack is set up, we issue a call to the collector, passing
1925 it the saved registers at (SP + 16). */
1927 /* Push SIMD&FP registers on the stack:
1929 SUB sp, sp, #(32 * 16)
1931 STP q30, q31, [sp, #(30 * 16)]
1936 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1937 for (i
= 30; i
>= 0; i
-= 2)
1938 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1940 /* Push general puspose registers on the stack. Note that we do not need
1941 to push x31 as it represents the xzr register and not the stack
1942 pointer in a STR instruction.
1944 SUB sp, sp, #(31 * 16)
1946 STR x30, [sp, #(30 * 16)]
1951 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1952 for (i
= 30; i
>= 0; i
-= 1)
1953 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1954 offset_memory_operand (i
* 16));
1956 /* Make space for 5 more cells.
1958 SUB sp, sp, #(5 * 16)
1961 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1966 ADD x4, sp, #((32 + 31 + 5) * 16)
1967 STR x4, [sp, #(4 * 16)]
1970 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1971 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1973 /* Save PC (tracepoint address):
1978 STR x3, [sp, #(3 * 16)]
1982 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1983 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1985 /* Save CPSR (NZCV), FPSR and FPCR:
1991 STR x2, [sp, #(2 * 16)]
1992 STR x1, [sp, #(1 * 16)]
1993 STR x0, [sp, #(0 * 16)]
1996 p
+= emit_mrs (p
, x2
, NZCV
);
1997 p
+= emit_mrs (p
, x1
, FPSR
);
1998 p
+= emit_mrs (p
, x0
, FPCR
);
1999 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2000 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2001 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2003 /* Push the collecting_t object. It consist of the address of the
2004 tracepoint and an ID for the current thread. We get the latter by
2005 reading the tpidr_el0 system register. It corresponds to the
2006 NT_ARM_TLS register accessible with ptrace.
2013 STP x0, x1, [sp, #-16]!
2017 p
+= emit_mov_addr (p
, x0
, tpoint
);
2018 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2019 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2023 The shared memory for the lock is at lockaddr. It will hold zero
2024 if no-one is holding the lock, otherwise it contains the address of
2025 the collecting_t object on the stack of the thread which acquired it.
2027 At this stage, the stack pointer points to this thread's collecting_t
2030 We use the following registers:
2031 - x0: Address of the lock.
2032 - x1: Pointer to collecting_t object.
2033 - x2: Scratch register.
2039 ; Trigger an event local to this core. So the following WFE
2040 ; instruction is ignored.
2043 ; Wait for an event. The event is triggered by either the SEVL
2044 ; or STLR instructions (store release).
2047 ; Atomically read at lockaddr. This marks the memory location as
2048 ; exclusive. This instruction also has memory constraints which
2049 ; make sure all previous data reads and writes are done before
2053 ; Try again if another thread holds the lock.
2056 ; We can lock it! Write the address of the collecting_t object.
2057 ; This instruction will fail if the memory location is not marked
2058 ; as exclusive anymore. If it succeeds, it will remove the
2059 ; exclusive mark on the memory location. This way, if another
2060 ; thread executes this instruction before us, we will fail and try
2067 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2068 p
+= emit_mov (p
, x1
, register_operand (sp
));
2072 p
+= emit_ldaxr (p
, x2
, x0
);
2073 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2074 p
+= emit_stxr (p
, w2
, x1
, x0
);
2075 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2077 /* Call collector (struct tracepoint *, unsigned char *):
2082 ; Saved registers start after the collecting_t object.
2085 ; We use an intra-procedure-call scratch register.
2086 MOV ip0, #(collector)
2089 ; And call back to C!
2094 p
+= emit_mov_addr (p
, x0
, tpoint
);
2095 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2097 p
+= emit_mov_addr (p
, ip0
, collector
);
2098 p
+= emit_blr (p
, ip0
);
2100 /* Release the lock.
2105 ; This instruction is a normal store with memory ordering
2106 ; constraints. Thanks to this we do not have to put a data
2107 ; barrier instruction to make sure all data read and writes are done
2108 ; before this instruction is executed. Furthermore, this instrucion
2109 ; will trigger an event, letting other threads know they can grab
2114 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2115 p
+= emit_stlr (p
, xzr
, x0
);
2117 /* Free collecting_t object:
2122 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2124 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2125 registers from the stack.
2127 LDR x2, [sp, #(2 * 16)]
2128 LDR x1, [sp, #(1 * 16)]
2129 LDR x0, [sp, #(0 * 16)]
2135 ADD sp, sp #(5 * 16)
2138 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2139 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2140 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2141 p
+= emit_msr (p
, NZCV
, x2
);
2142 p
+= emit_msr (p
, FPSR
, x1
);
2143 p
+= emit_msr (p
, FPCR
, x0
);
2145 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2147 /* Pop general purpose registers:
2151 LDR x30, [sp, #(30 * 16)]
2153 ADD sp, sp, #(31 * 16)
2156 for (i
= 0; i
<= 30; i
+= 1)
2157 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2158 offset_memory_operand (i
* 16));
2159 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2161 /* Pop SIMD&FP registers:
2165 LDP q30, q31, [sp, #(30 * 16)]
2167 ADD sp, sp, #(32 * 16)
2170 for (i
= 0; i
<= 30; i
+= 2)
2171 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2172 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2174 /* Write the code into the inferior memory. */
2175 append_insns (&buildaddr
, p
- buf
, buf
);
2177 /* Now emit the relocated instruction. */
2178 *adjusted_insn_addr
= buildaddr
;
2179 target_read_uint32 (tpaddr
, &insn
);
2181 insn_data
.base
.insn_addr
= tpaddr
;
2182 insn_data
.new_addr
= buildaddr
;
2183 insn_data
.insn_ptr
= buf
;
2185 aarch64_relocate_instruction (insn
, &visitor
,
2186 (struct aarch64_insn_data
*) &insn_data
);
2188 /* We may not have been able to relocate the instruction. */
2189 if (insn_data
.insn_ptr
== buf
)
2192 "E.Could not relocate instruction from %s to %s.",
2193 core_addr_to_string_nz (tpaddr
),
2194 core_addr_to_string_nz (buildaddr
));
2198 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2199 *adjusted_insn_addr_end
= buildaddr
;
2201 /* Go back to the start of the buffer. */
2204 /* Emit a branch back from the jump pad. */
2205 offset
= (tpaddr
+ orig_size
- buildaddr
);
2206 if (!can_encode_int32 (offset
, 28))
2209 "E.Jump back from jump pad too far from tracepoint "
2210 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2215 p
+= emit_b (p
, 0, offset
);
2216 append_insns (&buildaddr
, p
- buf
, buf
);
2218 /* Give the caller a branch instruction into the jump pad. */
2219 offset
= (*jump_entry
- tpaddr
);
2220 if (!can_encode_int32 (offset
, 28))
2223 "E.Jump pad too far from tracepoint "
2224 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2229 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2230 *jjump_pad_insn_size
= 4;
2232 /* Return the end address of our pad. */
2233 *jump_entry
= buildaddr
;
2238 /* Helper function writing LEN instructions from START into
2239 current_insn_ptr. */
2242 emit_ops_insns (const uint32_t *start
, int len
)
2244 CORE_ADDR buildaddr
= current_insn_ptr
;
2247 debug_printf ("Adding %d instrucions at %s\n",
2248 len
, paddress (buildaddr
));
2250 append_insns (&buildaddr
, len
, start
);
2251 current_insn_ptr
= buildaddr
;
2254 /* Pop a register from the stack. */
2257 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2259 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2262 /* Push a register on the stack. */
2265 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2267 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2270 /* Implementation of emit_ops method "emit_prologue". */
2273 aarch64_emit_prologue (void)
2278 /* This function emit a prologue for the following function prototype:
2280 enum eval_result_type f (unsigned char *regs,
2283 The first argument is a buffer of raw registers. The second
2284 argument is the result of
2285 evaluating the expression, which will be set to whatever is on top of
2286 the stack at the end.
2288 The stack set up by the prologue is as such:
2290 High *------------------------------------------------------*
2293 | x1 (ULONGEST *value) |
2294 | x0 (unsigned char *regs) |
2295 Low *------------------------------------------------------*
2297 As we are implementing a stack machine, each opcode can expand the
2298 stack so we never know how far we are from the data saved by this
2299 prologue. In order to be able refer to value and regs later, we save
2300 the current stack pointer in the frame pointer. This way, it is not
2301 clobbered when calling C functions.
2303 Finally, throughtout every operation, we are using register x0 as the
2304 top of the stack, and x1 as a scratch register. */
2306 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2307 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2308 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2310 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2313 emit_ops_insns (buf
, p
- buf
);
2316 /* Implementation of emit_ops method "emit_epilogue". */
2319 aarch64_emit_epilogue (void)
2324 /* Store the result of the expression (x0) in *value. */
2325 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2326 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2327 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2329 /* Restore the previous state. */
2330 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2331 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2333 /* Return expr_eval_no_error. */
2334 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2335 p
+= emit_ret (p
, lr
);
2337 emit_ops_insns (buf
, p
- buf
);
2340 /* Implementation of emit_ops method "emit_add". */
2343 aarch64_emit_add (void)
2348 p
+= emit_pop (p
, x1
);
2349 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2351 emit_ops_insns (buf
, p
- buf
);
2354 /* Implementation of emit_ops method "emit_sub". */
2357 aarch64_emit_sub (void)
2362 p
+= emit_pop (p
, x1
);
2363 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2365 emit_ops_insns (buf
, p
- buf
);
2368 /* Implementation of emit_ops method "emit_mul". */
2371 aarch64_emit_mul (void)
2376 p
+= emit_pop (p
, x1
);
2377 p
+= emit_mul (p
, x0
, x1
, x0
);
2379 emit_ops_insns (buf
, p
- buf
);
2382 /* Implementation of emit_ops method "emit_lsh". */
2385 aarch64_emit_lsh (void)
2390 p
+= emit_pop (p
, x1
);
2391 p
+= emit_lslv (p
, x0
, x1
, x0
);
2393 emit_ops_insns (buf
, p
- buf
);
2396 /* Implementation of emit_ops method "emit_rsh_signed". */
2399 aarch64_emit_rsh_signed (void)
2404 p
+= emit_pop (p
, x1
);
2405 p
+= emit_asrv (p
, x0
, x1
, x0
);
2407 emit_ops_insns (buf
, p
- buf
);
2410 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2413 aarch64_emit_rsh_unsigned (void)
2418 p
+= emit_pop (p
, x1
);
2419 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2421 emit_ops_insns (buf
, p
- buf
);
2424 /* Implementation of emit_ops method "emit_ext". */
2427 aarch64_emit_ext (int arg
)
2432 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2434 emit_ops_insns (buf
, p
- buf
);
2437 /* Implementation of emit_ops method "emit_log_not". */
2440 aarch64_emit_log_not (void)
2445 /* If the top of the stack is 0, replace it with 1. Else replace it with
2448 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2449 p
+= emit_cset (p
, x0
, EQ
);
2451 emit_ops_insns (buf
, p
- buf
);
2454 /* Implementation of emit_ops method "emit_bit_and". */
2457 aarch64_emit_bit_and (void)
2462 p
+= emit_pop (p
, x1
);
2463 p
+= emit_and (p
, x0
, x0
, x1
);
2465 emit_ops_insns (buf
, p
- buf
);
2468 /* Implementation of emit_ops method "emit_bit_or". */
2471 aarch64_emit_bit_or (void)
2476 p
+= emit_pop (p
, x1
);
2477 p
+= emit_orr (p
, x0
, x0
, x1
);
2479 emit_ops_insns (buf
, p
- buf
);
2482 /* Implementation of emit_ops method "emit_bit_xor". */
2485 aarch64_emit_bit_xor (void)
2490 p
+= emit_pop (p
, x1
);
2491 p
+= emit_eor (p
, x0
, x0
, x1
);
2493 emit_ops_insns (buf
, p
- buf
);
2496 /* Implementation of emit_ops method "emit_bit_not". */
2499 aarch64_emit_bit_not (void)
2504 p
+= emit_mvn (p
, x0
, x0
);
2506 emit_ops_insns (buf
, p
- buf
);
2509 /* Implementation of emit_ops method "emit_equal". */
2512 aarch64_emit_equal (void)
2517 p
+= emit_pop (p
, x1
);
2518 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2519 p
+= emit_cset (p
, x0
, EQ
);
2521 emit_ops_insns (buf
, p
- buf
);
2524 /* Implementation of emit_ops method "emit_less_signed". */
2527 aarch64_emit_less_signed (void)
2532 p
+= emit_pop (p
, x1
);
2533 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2534 p
+= emit_cset (p
, x0
, LT
);
2536 emit_ops_insns (buf
, p
- buf
);
2539 /* Implementation of emit_ops method "emit_less_unsigned". */
2542 aarch64_emit_less_unsigned (void)
2547 p
+= emit_pop (p
, x1
);
2548 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2549 p
+= emit_cset (p
, x0
, LO
);
2551 emit_ops_insns (buf
, p
- buf
);
2554 /* Implementation of emit_ops method "emit_ref". */
2557 aarch64_emit_ref (int size
)
2565 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2568 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2571 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2574 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2577 /* Unknown size, bail on compilation. */
2582 emit_ops_insns (buf
, p
- buf
);
2585 /* Implementation of emit_ops method "emit_if_goto". */
2588 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2593 /* The Z flag is set or cleared here. */
2594 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2595 /* This instruction must not change the Z flag. */
2596 p
+= emit_pop (p
, x0
);
2597 /* Branch over the next instruction if x0 == 0. */
2598 p
+= emit_bcond (p
, EQ
, 8);
2600 /* The NOP instruction will be patched with an unconditional branch. */
2602 *offset_p
= (p
- buf
) * 4;
2607 emit_ops_insns (buf
, p
- buf
);
2610 /* Implementation of emit_ops method "emit_goto". */
2613 aarch64_emit_goto (int *offset_p
, int *size_p
)
2618 /* The NOP instruction will be patched with an unconditional branch. */
2625 emit_ops_insns (buf
, p
- buf
);
2628 /* Implementation of emit_ops method "write_goto_address". */
2631 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2635 emit_b (&insn
, 0, to
- from
);
2636 append_insns (&from
, 1, &insn
);
2639 /* Implementation of emit_ops method "emit_const". */
2642 aarch64_emit_const (LONGEST num
)
2647 p
+= emit_mov_addr (p
, x0
, num
);
2649 emit_ops_insns (buf
, p
- buf
);
2652 /* Implementation of emit_ops method "emit_call". */
2655 aarch64_emit_call (CORE_ADDR fn
)
2660 p
+= emit_mov_addr (p
, ip0
, fn
);
2661 p
+= emit_blr (p
, ip0
);
2663 emit_ops_insns (buf
, p
- buf
);
2666 /* Implementation of emit_ops method "emit_reg". */
2669 aarch64_emit_reg (int reg
)
2674 /* Set x0 to unsigned char *regs. */
2675 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2676 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2677 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2679 emit_ops_insns (buf
, p
- buf
);
2681 aarch64_emit_call (get_raw_reg_func_addr ());
2684 /* Implementation of emit_ops method "emit_pop". */
2687 aarch64_emit_pop (void)
2692 p
+= emit_pop (p
, x0
);
2694 emit_ops_insns (buf
, p
- buf
);
2697 /* Implementation of emit_ops method "emit_stack_flush". */
2700 aarch64_emit_stack_flush (void)
2705 p
+= emit_push (p
, x0
);
2707 emit_ops_insns (buf
, p
- buf
);
2710 /* Implementation of emit_ops method "emit_zero_ext". */
2713 aarch64_emit_zero_ext (int arg
)
2718 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2720 emit_ops_insns (buf
, p
- buf
);
2723 /* Implementation of emit_ops method "emit_swap". */
2726 aarch64_emit_swap (void)
2731 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2732 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2733 p
+= emit_mov (p
, x0
, register_operand (x1
));
2735 emit_ops_insns (buf
, p
- buf
);
2738 /* Implementation of emit_ops method "emit_stack_adjust". */
2741 aarch64_emit_stack_adjust (int n
)
2743 /* This is not needed with our design. */
2747 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2749 emit_ops_insns (buf
, p
- buf
);
2752 /* Implementation of emit_ops method "emit_int_call_1". */
2755 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2760 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2762 emit_ops_insns (buf
, p
- buf
);
2764 aarch64_emit_call (fn
);
2767 /* Implementation of emit_ops method "emit_void_call_2". */
2770 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2775 /* Push x0 on the stack. */
2776 aarch64_emit_stack_flush ();
2778 /* Setup arguments for the function call:
2781 x1: top of the stack
2786 p
+= emit_mov (p
, x1
, register_operand (x0
));
2787 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2789 emit_ops_insns (buf
, p
- buf
);
2791 aarch64_emit_call (fn
);
2794 aarch64_emit_pop ();
2797 /* Implementation of emit_ops method "emit_eq_goto". */
2800 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2805 p
+= emit_pop (p
, x1
);
2806 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2807 /* Branch over the next instruction if x0 != x1. */
2808 p
+= emit_bcond (p
, NE
, 8);
2809 /* The NOP instruction will be patched with an unconditional branch. */
2811 *offset_p
= (p
- buf
) * 4;
2816 emit_ops_insns (buf
, p
- buf
);
2819 /* Implementation of emit_ops method "emit_ne_goto". */
2822 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2827 p
+= emit_pop (p
, x1
);
2828 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2829 /* Branch over the next instruction if x0 == x1. */
2830 p
+= emit_bcond (p
, EQ
, 8);
2831 /* The NOP instruction will be patched with an unconditional branch. */
2833 *offset_p
= (p
- buf
) * 4;
2838 emit_ops_insns (buf
, p
- buf
);
2841 /* Implementation of emit_ops method "emit_lt_goto". */
2844 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2849 p
+= emit_pop (p
, x1
);
2850 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2851 /* Branch over the next instruction if x0 >= x1. */
2852 p
+= emit_bcond (p
, GE
, 8);
2853 /* The NOP instruction will be patched with an unconditional branch. */
2855 *offset_p
= (p
- buf
) * 4;
2860 emit_ops_insns (buf
, p
- buf
);
2863 /* Implementation of emit_ops method "emit_le_goto". */
2866 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2871 p
+= emit_pop (p
, x1
);
2872 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2873 /* Branch over the next instruction if x0 > x1. */
2874 p
+= emit_bcond (p
, GT
, 8);
2875 /* The NOP instruction will be patched with an unconditional branch. */
2877 *offset_p
= (p
- buf
) * 4;
2882 emit_ops_insns (buf
, p
- buf
);
2885 /* Implementation of emit_ops method "emit_gt_goto". */
2888 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2893 p
+= emit_pop (p
, x1
);
2894 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2895 /* Branch over the next instruction if x0 <= x1. */
2896 p
+= emit_bcond (p
, LE
, 8);
2897 /* The NOP instruction will be patched with an unconditional branch. */
2899 *offset_p
= (p
- buf
) * 4;
2904 emit_ops_insns (buf
, p
- buf
);
2907 /* Implementation of emit_ops method "emit_ge_got". */
2910 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2915 p
+= emit_pop (p
, x1
);
2916 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2917 /* Branch over the next instruction if x0 <= x1. */
2918 p
+= emit_bcond (p
, LT
, 8);
2919 /* The NOP instruction will be patched with an unconditional branch. */
2921 *offset_p
= (p
- buf
) * 4;
2926 emit_ops_insns (buf
, p
- buf
);
2929 static struct emit_ops aarch64_emit_ops_impl
=
2931 aarch64_emit_prologue
,
2932 aarch64_emit_epilogue
,
2937 aarch64_emit_rsh_signed
,
2938 aarch64_emit_rsh_unsigned
,
2940 aarch64_emit_log_not
,
2941 aarch64_emit_bit_and
,
2942 aarch64_emit_bit_or
,
2943 aarch64_emit_bit_xor
,
2944 aarch64_emit_bit_not
,
2946 aarch64_emit_less_signed
,
2947 aarch64_emit_less_unsigned
,
2949 aarch64_emit_if_goto
,
2951 aarch64_write_goto_address
,
2956 aarch64_emit_stack_flush
,
2957 aarch64_emit_zero_ext
,
2959 aarch64_emit_stack_adjust
,
2960 aarch64_emit_int_call_1
,
2961 aarch64_emit_void_call_2
,
2962 aarch64_emit_eq_goto
,
2963 aarch64_emit_ne_goto
,
2964 aarch64_emit_lt_goto
,
2965 aarch64_emit_le_goto
,
2966 aarch64_emit_gt_goto
,
2967 aarch64_emit_ge_got
,
2970 /* Implementation of linux_target_ops method "emit_ops". */
2972 static struct emit_ops
*
2973 aarch64_emit_ops (void)
2975 return &aarch64_emit_ops_impl
;
2978 /* Implementation of linux_target_ops method
2979 "get_min_fast_tracepoint_insn_len". */
2982 aarch64_get_min_fast_tracepoint_insn_len (void)
2987 /* Implementation of linux_target_ops method "supports_range_stepping". */
2990 aarch64_supports_range_stepping (void)
2995 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2997 static const gdb_byte
*
2998 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3000 if (is_64bit_tdesc ())
3002 *size
= aarch64_breakpoint_len
;
3003 return aarch64_breakpoint
;
3006 return arm_sw_breakpoint_from_kind (kind
, size
);
3009 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3012 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3014 if (is_64bit_tdesc ())
3015 return aarch64_breakpoint_len
;
3017 return arm_breakpoint_kind_from_pc (pcptr
);
3020 /* Implementation of the linux_target_ops method
3021 "breakpoint_kind_from_current_state". */
3024 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3026 if (is_64bit_tdesc ())
3027 return aarch64_breakpoint_len
;
3029 return arm_breakpoint_kind_from_current_state (pcptr
);
3032 /* Support for hardware single step. */
3035 aarch64_supports_hardware_single_step (void)
3040 struct linux_target_ops the_low_target
=
3044 aarch64_cannot_fetch_register
,
3045 aarch64_cannot_store_register
,
3046 NULL
, /* fetch_register */
3049 aarch64_breakpoint_kind_from_pc
,
3050 aarch64_sw_breakpoint_from_kind
,
3051 NULL
, /* get_next_pcs */
3052 0, /* decr_pc_after_break */
3053 aarch64_breakpoint_at
,
3054 aarch64_supports_z_point_type
,
3055 aarch64_insert_point
,
3056 aarch64_remove_point
,
3057 aarch64_stopped_by_watchpoint
,
3058 aarch64_stopped_data_address
,
3059 NULL
, /* collect_ptrace_register */
3060 NULL
, /* supply_ptrace_register */
3061 aarch64_linux_siginfo_fixup
,
3062 aarch64_linux_new_process
,
3063 aarch64_linux_delete_process
,
3064 aarch64_linux_new_thread
,
3065 aarch64_linux_delete_thread
,
3066 aarch64_linux_new_fork
,
3067 aarch64_linux_prepare_to_resume
,
3068 NULL
, /* process_qsupported */
3069 aarch64_supports_tracepoints
,
3070 aarch64_get_thread_area
,
3071 aarch64_install_fast_tracepoint_jump_pad
,
3073 aarch64_get_min_fast_tracepoint_insn_len
,
3074 aarch64_supports_range_stepping
,
3075 aarch64_breakpoint_kind_from_current_state
,
3076 aarch64_supports_hardware_single_step
,
3077 aarch64_get_syscall_trapinfo
,
3081 initialize_low_arch (void)
3083 initialize_low_arch_aarch32 ();
3085 initialize_regsets_info (&aarch64_regsets_info
);
3086 initialize_regsets_info (&aarch64_sve_regsets_info
);
3089 initialize_low_tdesc ();