1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc
*tdesc_aarch64
;
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
62 /* Per-process arch-specific data we want to keep. */
64 struct arch_process_info
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
76 struct aarch64_debug_reg_state debug_reg_state
;
79 /* Return true if the size of register 0 is 8 byte. */
84 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
86 return register_size (regcache
->tdesc
, 0) == 8;
89 /* Implementation of linux_target_ops method "cannot_store_register". */
92 aarch64_cannot_store_register (int regno
)
94 return regno
>= AARCH64_NUM_REGS
;
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
100 aarch64_cannot_fetch_register (int regno
)
102 return regno
>= AARCH64_NUM_REGS
;
106 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
108 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
111 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
112 collect_register (regcache
, AARCH64_X0_REGNO
+ i
, ®set
->regs
[i
]);
113 collect_register (regcache
, AARCH64_SP_REGNO
, ®set
->sp
);
114 collect_register (regcache
, AARCH64_PC_REGNO
, ®set
->pc
);
115 collect_register (regcache
, AARCH64_CPSR_REGNO
, ®set
->pstate
);
119 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
121 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
124 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
125 supply_register (regcache
, AARCH64_X0_REGNO
+ i
, ®set
->regs
[i
]);
126 supply_register (regcache
, AARCH64_SP_REGNO
, ®set
->sp
);
127 supply_register (regcache
, AARCH64_PC_REGNO
, ®set
->pc
);
128 supply_register (regcache
, AARCH64_CPSR_REGNO
, ®set
->pstate
);
132 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
134 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
137 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
138 collect_register (regcache
, AARCH64_V0_REGNO
+ i
, ®set
->vregs
[i
]);
139 collect_register (regcache
, AARCH64_FPSR_REGNO
, ®set
->fpsr
);
140 collect_register (regcache
, AARCH64_FPCR_REGNO
, ®set
->fpcr
);
144 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
146 const struct user_fpsimd_state
*regset
147 = (const struct user_fpsimd_state
*) buf
;
150 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
151 supply_register (regcache
, AARCH64_V0_REGNO
+ i
, ®set
->vregs
[i
]);
152 supply_register (regcache
, AARCH64_FPSR_REGNO
, ®set
->fpsr
);
153 supply_register (regcache
, AARCH64_FPCR_REGNO
, ®set
->fpcr
);
156 /* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158 extern int debug_threads
;
160 /* Implementation of linux_target_ops method "get_pc". */
163 aarch64_get_pc (struct regcache
*regcache
)
165 if (register_size (regcache
->tdesc
, 0) == 8)
169 collect_register_by_name (regcache
, "pc", &pc
);
171 debug_printf ("stop pc is %08lx\n", pc
);
178 collect_register_by_name (regcache
, "pc", &pc
);
180 debug_printf ("stop pc is %04x\n", pc
);
185 /* Implementation of linux_target_ops method "set_pc". */
188 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
190 if (register_size (regcache
->tdesc
, 0) == 8)
192 unsigned long newpc
= pc
;
193 supply_register_by_name (regcache
, "pc", &newpc
);
197 unsigned int newpc
= pc
;
198 supply_register_by_name (regcache
, "pc", &newpc
);
202 #define aarch64_breakpoint_len 4
204 /* AArch64 BRK software debug mode instruction.
205 This instruction needs to match gdb/aarch64-tdep.c
206 (aarch64_default_breakpoint). */
207 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
209 /* Implementation of linux_target_ops method "breakpoint_at". */
212 aarch64_breakpoint_at (CORE_ADDR where
)
214 gdb_byte insn
[aarch64_breakpoint_len
];
216 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
217 aarch64_breakpoint_len
);
218 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
225 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
229 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
231 state
->dr_addr_bp
[i
] = 0;
232 state
->dr_ctrl_bp
[i
] = 0;
233 state
->dr_ref_count_bp
[i
] = 0;
236 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
238 state
->dr_addr_wp
[i
] = 0;
239 state
->dr_ctrl_wp
[i
] = 0;
240 state
->dr_ref_count_wp
[i
] = 0;
244 /* Return the pointer to the debug register state structure in the
245 current process' arch-specific data area. */
247 struct aarch64_debug_reg_state
*
248 aarch64_get_debug_reg_state (pid_t pid
)
250 struct process_info
*proc
= find_process_pid (pid
);
252 return &proc
->priv
->arch_private
->debug_reg_state
;
255 /* Implementation of linux_target_ops method "supports_z_point_type". */
258 aarch64_supports_z_point_type (char z_type
)
264 if (!extended_protocol
&& is_64bit_tdesc ())
266 /* Only enable Z0 packet in non-multi-arch debugging. If
267 extended protocol is used, don't enable Z0 packet because
268 GDBserver may attach to 32-bit process. */
273 /* Disable Z0 packet so that GDBserver doesn't have to handle
274 different breakpoint instructions (aarch64, arm, thumb etc)
275 in multi-arch debugging. */
280 case Z_PACKET_WRITE_WP
:
281 case Z_PACKET_READ_WP
:
282 case Z_PACKET_ACCESS_WP
:
289 /* Implementation of linux_target_ops method "insert_point".
291 It actually only records the info of the to-be-inserted bp/wp;
292 the actual insertion will happen when threads are resumed. */
295 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
296 int len
, struct raw_breakpoint
*bp
)
299 enum target_hw_bp_type targ_type
;
300 struct aarch64_debug_reg_state
*state
301 = aarch64_get_debug_reg_state (pid_of (current_thread
));
304 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
305 (unsigned long) addr
, len
);
307 /* Determine the type from the raw breakpoint type. */
308 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
310 if (targ_type
!= hw_execute
)
312 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
313 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
314 1 /* is_insert */, state
);
322 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
323 instruction. Set it to 2 to correctly encode length bit
324 mask in hardware/watchpoint control register. */
327 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
328 1 /* is_insert */, state
);
332 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
338 /* Implementation of linux_target_ops method "remove_point".
340 It actually only records the info of the to-be-removed bp/wp,
341 the actual removal will be done when threads are resumed. */
344 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
345 int len
, struct raw_breakpoint
*bp
)
348 enum target_hw_bp_type targ_type
;
349 struct aarch64_debug_reg_state
*state
350 = aarch64_get_debug_reg_state (pid_of (current_thread
));
353 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
354 (unsigned long) addr
, len
);
356 /* Determine the type from the raw breakpoint type. */
357 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
359 /* Set up state pointers. */
360 if (targ_type
!= hw_execute
)
362 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
368 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
369 instruction. Set it to 2 to correctly encode length bit
370 mask in hardware/watchpoint control register. */
373 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
374 0 /* is_insert */, state
);
378 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
384 /* Implementation of linux_target_ops method "stopped_data_address". */
387 aarch64_stopped_data_address (void)
391 struct aarch64_debug_reg_state
*state
;
393 pid
= lwpid_of (current_thread
);
395 /* Get the siginfo. */
396 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
397 return (CORE_ADDR
) 0;
399 /* Need to be a hardware breakpoint/watchpoint trap. */
400 if (siginfo
.si_signo
!= SIGTRAP
401 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
402 return (CORE_ADDR
) 0;
404 /* Check if the address matches any watched address. */
405 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
406 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
408 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
409 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
410 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
];
411 if (state
->dr_ref_count_wp
[i
]
412 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
413 && addr_trap
>= addr_watch
414 && addr_trap
< addr_watch
+ len
)
418 return (CORE_ADDR
) 0;
421 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
424 aarch64_stopped_by_watchpoint (void)
426 if (aarch64_stopped_data_address () != 0)
432 /* Fetch the thread-local storage pointer for libthread_db. */
435 ps_get_thread_area (const struct ps_prochandle
*ph
,
436 lwpid_t lwpid
, int idx
, void **base
)
438 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
442 /* Implementation of linux_target_ops method "siginfo_fixup". */
445 aarch64_linux_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
447 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
448 if (!is_64bit_tdesc ())
451 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
454 aarch64_siginfo_from_compat_siginfo (native
,
455 (struct compat_siginfo
*) inf
);
463 /* Implementation of linux_target_ops method "linux_new_process". */
465 static struct arch_process_info
*
466 aarch64_linux_new_process (void)
468 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
470 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
475 /* Implementation of linux_target_ops method "linux_new_fork". */
478 aarch64_linux_new_fork (struct process_info
*parent
,
479 struct process_info
*child
)
481 /* These are allocated by linux_add_process. */
482 gdb_assert (parent
->priv
!= NULL
483 && parent
->priv
->arch_private
!= NULL
);
484 gdb_assert (child
->priv
!= NULL
485 && child
->priv
->arch_private
!= NULL
);
487 /* Linux kernel before 2.6.33 commit
488 72f674d203cd230426437cdcf7dd6f681dad8b0d
489 will inherit hardware debug registers from parent
490 on fork/vfork/clone. Newer Linux kernels create such tasks with
491 zeroed debug registers.
493 GDB core assumes the child inherits the watchpoints/hw
494 breakpoints of the parent, and will remove them all from the
495 forked off process. Copy the debug registers mirrors into the
496 new process so that all breakpoints and watchpoints can be
497 removed together. The debug registers mirror will become zeroed
498 in the end before detaching the forked off process, thus making
499 this compatible with older Linux kernels too. */
501 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
504 /* Return the right target description according to the ELF file of
507 static const struct target_desc
*
508 aarch64_linux_read_description (void)
510 unsigned int machine
;
514 tid
= lwpid_of (current_thread
);
516 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
519 return tdesc_aarch64
;
521 return tdesc_arm_with_neon
;
524 /* Implementation of linux_target_ops method "arch_setup". */
527 aarch64_arch_setup (void)
529 current_process ()->tdesc
= aarch64_linux_read_description ();
531 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
534 static struct regset_info aarch64_regsets
[] =
536 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
537 sizeof (struct user_pt_regs
), GENERAL_REGS
,
538 aarch64_fill_gregset
, aarch64_store_gregset
},
539 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
540 sizeof (struct user_fpsimd_state
), FP_REGS
,
541 aarch64_fill_fpregset
, aarch64_store_fpregset
546 static struct regsets_info aarch64_regsets_info
=
548 aarch64_regsets
, /* regsets */
550 NULL
, /* disabled_regsets */
553 static struct regs_info regs_info_aarch64
=
555 NULL
, /* regset_bitmap */
557 &aarch64_regsets_info
,
560 /* Implementation of linux_target_ops method "regs_info". */
562 static const struct regs_info
*
563 aarch64_regs_info (void)
565 if (is_64bit_tdesc ())
566 return ®s_info_aarch64
;
568 return ®s_info_aarch32
;
571 /* Implementation of linux_target_ops method "supports_tracepoints". */
574 aarch64_supports_tracepoints (void)
576 if (current_thread
== NULL
)
580 /* We don't support tracepoints on aarch32 now. */
581 return is_64bit_tdesc ();
585 /* Implementation of linux_target_ops method "get_thread_area". */
588 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
593 iovec
.iov_base
= ®
;
594 iovec
.iov_len
= sizeof (reg
);
596 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
604 /* List of condition codes that we need. */
606 enum aarch64_condition_codes
617 enum aarch64_operand_type
623 /* Representation of an operand. At this time, it only supports register
624 and immediate types. */
626 struct aarch64_operand
628 /* Type of the operand. */
629 enum aarch64_operand_type type
;
631 /* Value of the operand according to the type. */
635 struct aarch64_register reg
;
639 /* List of registers that we are currently using, we can add more here as
640 we need to use them. */
642 /* General purpose scratch registers (64 bit). */
643 static const struct aarch64_register x0
= { 0, 1 };
644 static const struct aarch64_register x1
= { 1, 1 };
645 static const struct aarch64_register x2
= { 2, 1 };
646 static const struct aarch64_register x3
= { 3, 1 };
647 static const struct aarch64_register x4
= { 4, 1 };
649 /* General purpose scratch registers (32 bit). */
650 static const struct aarch64_register w0
= { 0, 0 };
651 static const struct aarch64_register w2
= { 2, 0 };
653 /* Intra-procedure scratch registers. */
654 static const struct aarch64_register ip0
= { 16, 1 };
656 /* Special purpose registers. */
657 static const struct aarch64_register fp
= { 29, 1 };
658 static const struct aarch64_register lr
= { 30, 1 };
659 static const struct aarch64_register sp
= { 31, 1 };
660 static const struct aarch64_register xzr
= { 31, 1 };
662 /* Dynamically allocate a new register. If we know the register
663 statically, we should make it a global as above instead of using this
666 static struct aarch64_register
667 aarch64_register (unsigned num
, int is64
)
669 return (struct aarch64_register
) { num
, is64
};
672 /* Helper function to create a register operand, for instructions with
673 different types of operands.
676 p += emit_mov (p, x0, register_operand (x1)); */
678 static struct aarch64_operand
679 register_operand (struct aarch64_register reg
)
681 struct aarch64_operand operand
;
683 operand
.type
= OPERAND_REGISTER
;
689 /* Helper function to create an immediate operand, for instructions with
690 different types of operands.
693 p += emit_mov (p, x0, immediate_operand (12)); */
695 static struct aarch64_operand
696 immediate_operand (uint32_t imm
)
698 struct aarch64_operand operand
;
700 operand
.type
= OPERAND_IMMEDIATE
;
706 /* Helper function to create an offset memory operand.
709 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
711 static struct aarch64_memory_operand
712 offset_memory_operand (int32_t offset
)
714 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
717 /* Helper function to create a pre-index memory operand.
720 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
722 static struct aarch64_memory_operand
723 preindex_memory_operand (int32_t index
)
725 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
728 /* Helper function to create a post-index memory operand.
731 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
733 static struct aarch64_memory_operand
734 postindex_memory_operand (int32_t index
)
736 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
739 /* System control registers. These special registers can be written and
740 read with the MRS and MSR instructions.
742 - NZCV: Condition flags. GDB refers to this register under the CPSR
744 - FPSR: Floating-point status register.
745 - FPCR: Floating-point control registers.
746 - TPIDR_EL0: Software thread ID register. */
748 enum aarch64_system_control_registers
750 /* op0 op1 crn crm op2 */
751 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
752 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
753 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
754 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
757 /* Write a BLR instruction into *BUF.
761 RN is the register to branch to. */
764 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
766 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
769 /* Write a RET instruction into *BUF.
773 RN is the register to branch to. */
776 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
778 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
782 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
783 struct aarch64_register rt
,
784 struct aarch64_register rt2
,
785 struct aarch64_register rn
,
786 struct aarch64_memory_operand operand
)
793 opc
= ENCODE (2, 2, 30);
795 opc
= ENCODE (0, 2, 30);
797 switch (operand
.type
)
799 case MEMORY_OPERAND_OFFSET
:
801 pre_index
= ENCODE (1, 1, 24);
802 write_back
= ENCODE (0, 1, 23);
805 case MEMORY_OPERAND_POSTINDEX
:
807 pre_index
= ENCODE (0, 1, 24);
808 write_back
= ENCODE (1, 1, 23);
811 case MEMORY_OPERAND_PREINDEX
:
813 pre_index
= ENCODE (1, 1, 24);
814 write_back
= ENCODE (1, 1, 23);
821 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
822 | ENCODE (operand
.index
>> 3, 7, 15)
823 | ENCODE (rt2
.num
, 5, 10)
824 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
827 /* Write a STP instruction into *BUF.
829 STP rt, rt2, [rn, #offset]
830 STP rt, rt2, [rn, #index]!
831 STP rt, rt2, [rn], #index
833 RT and RT2 are the registers to store.
834 RN is the base address register.
835 OFFSET is the immediate to add to the base address. It is limited to a
836 -512 .. 504 range (7 bits << 3). */
839 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
840 struct aarch64_register rt2
, struct aarch64_register rn
,
841 struct aarch64_memory_operand operand
)
843 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
846 /* Write a LDP instruction into *BUF.
848 LDP rt, rt2, [rn, #offset]
849 LDP rt, rt2, [rn, #index]!
850 LDP rt, rt2, [rn], #index
852 RT and RT2 are the registers to store.
853 RN is the base address register.
854 OFFSET is the immediate to add to the base address. It is limited to a
855 -512 .. 504 range (7 bits << 3). */
858 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
859 struct aarch64_register rt2
, struct aarch64_register rn
,
860 struct aarch64_memory_operand operand
)
862 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
865 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
867 LDP qt, qt2, [rn, #offset]
869 RT and RT2 are the Q registers to store.
870 RN is the base address register.
871 OFFSET is the immediate to add to the base address. It is limited to
872 -1024 .. 1008 range (7 bits << 4). */
875 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
876 struct aarch64_register rn
, int32_t offset
)
878 uint32_t opc
= ENCODE (2, 2, 30);
879 uint32_t pre_index
= ENCODE (1, 1, 24);
881 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
882 | ENCODE (offset
>> 4, 7, 15)
883 | ENCODE (rt2
, 5, 10)
884 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
887 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
889 STP qt, qt2, [rn, #offset]
891 RT and RT2 are the Q registers to store.
892 RN is the base address register.
893 OFFSET is the immediate to add to the base address. It is limited to
894 -1024 .. 1008 range (7 bits << 4). */
897 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
898 struct aarch64_register rn
, int32_t offset
)
900 uint32_t opc
= ENCODE (2, 2, 30);
901 uint32_t pre_index
= ENCODE (1, 1, 24);
903 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
904 | ENCODE (offset
>> 4, 7, 15)
905 | ENCODE (rt2
, 5, 10)
906 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
909 /* Write a LDRH instruction into *BUF.
911 LDRH wt, [xn, #offset]
912 LDRH wt, [xn, #index]!
913 LDRH wt, [xn], #index
915 RT is the register to store.
916 RN is the base address register.
917 OFFSET is the immediate to add to the base address. It is limited to
918 0 .. 32760 range (12 bits << 3). */
921 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
922 struct aarch64_register rn
,
923 struct aarch64_memory_operand operand
)
925 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
928 /* Write a LDRB instruction into *BUF.
930 LDRB wt, [xn, #offset]
931 LDRB wt, [xn, #index]!
932 LDRB wt, [xn], #index
934 RT is the register to store.
935 RN is the base address register.
936 OFFSET is the immediate to add to the base address. It is limited to
937 0 .. 32760 range (12 bits << 3). */
940 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
941 struct aarch64_register rn
,
942 struct aarch64_memory_operand operand
)
944 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
949 /* Write a STR instruction into *BUF.
951 STR rt, [rn, #offset]
952 STR rt, [rn, #index]!
955 RT is the register to store.
956 RN is the base address register.
957 OFFSET is the immediate to add to the base address. It is limited to
958 0 .. 32760 range (12 bits << 3). */
961 emit_str (uint32_t *buf
, struct aarch64_register rt
,
962 struct aarch64_register rn
,
963 struct aarch64_memory_operand operand
)
965 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
968 /* Helper function emitting an exclusive load or store instruction. */
971 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
972 enum aarch64_opcodes opcode
,
973 struct aarch64_register rs
,
974 struct aarch64_register rt
,
975 struct aarch64_register rt2
,
976 struct aarch64_register rn
)
978 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
979 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
980 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
983 /* Write a LAXR instruction into *BUF.
987 RT is the destination register.
988 RN is the base address register. */
991 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
992 struct aarch64_register rn
)
994 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
998 /* Write a STXR instruction into *BUF.
1002 RS is the result register, it indicates if the store succeeded or not.
1003 RT is the destination register.
1004 RN is the base address register. */
1007 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1008 struct aarch64_register rt
, struct aarch64_register rn
)
1010 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1014 /* Write a STLR instruction into *BUF.
1018 RT is the register to store.
1019 RN is the base address register. */
1022 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1023 struct aarch64_register rn
)
1025 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1029 /* Helper function for data processing instructions with register sources. */
1032 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1033 struct aarch64_register rd
,
1034 struct aarch64_register rn
,
1035 struct aarch64_register rm
)
1037 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1039 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1040 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1043 /* Helper function for data processing instructions taking either a register
1047 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1048 struct aarch64_register rd
,
1049 struct aarch64_register rn
,
1050 struct aarch64_operand operand
)
1052 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1053 /* The opcode is different for register and immediate source operands. */
1054 uint32_t operand_opcode
;
1056 if (operand
.type
== OPERAND_IMMEDIATE
)
1058 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1059 operand_opcode
= ENCODE (8, 4, 25);
1061 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1062 | ENCODE (operand
.imm
, 12, 10)
1063 | ENCODE (rn
.num
, 5, 5)
1064 | ENCODE (rd
.num
, 5, 0));
1068 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1069 operand_opcode
= ENCODE (5, 4, 25);
1071 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1076 /* Write an ADD instruction into *BUF.
1081 This function handles both an immediate and register add.
1083 RD is the destination register.
1084 RN is the input register.
1085 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1086 OPERAND_REGISTER. */
1089 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1090 struct aarch64_register rn
, struct aarch64_operand operand
)
1092 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1095 /* Write a SUB instruction into *BUF.
1100 This function handles both an immediate and register sub.
1102 RD is the destination register.
1103 RN is the input register.
1104 IMM is the immediate to substract to RN. */
1107 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1108 struct aarch64_register rn
, struct aarch64_operand operand
)
1110 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1113 /* Write a MOV instruction into *BUF.
1118 This function handles both a wide immediate move and a register move,
1119 with the condition that the source register is not xzr. xzr and the
1120 stack pointer share the same encoding and this function only supports
1123 RD is the destination register.
1124 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1125 OPERAND_REGISTER. */
1128 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1129 struct aarch64_operand operand
)
1131 if (operand
.type
== OPERAND_IMMEDIATE
)
1133 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1134 /* Do not shift the immediate. */
1135 uint32_t shift
= ENCODE (0, 2, 21);
1137 return aarch64_emit_insn (buf
, MOV
| size
| shift
1138 | ENCODE (operand
.imm
, 16, 5)
1139 | ENCODE (rd
.num
, 5, 0));
1142 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1145 /* Write a MOVK instruction into *BUF.
1147 MOVK rd, #imm, lsl #shift
1149 RD is the destination register.
1150 IMM is the immediate.
1151 SHIFT is the logical shift left to apply to IMM. */
1154 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1157 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1159 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1160 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1163 /* Write instructions into *BUF in order to move ADDR into a register.
1164 ADDR can be a 64-bit value.
1166 This function will emit a series of MOV and MOVK instructions, such as:
1169 MOVK xd, #(addr >> 16), lsl #16
1170 MOVK xd, #(addr >> 32), lsl #32
1171 MOVK xd, #(addr >> 48), lsl #48 */
1174 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1178 /* The MOV (wide immediate) instruction clears to top bits of the
1180 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1182 if ((addr
>> 16) != 0)
1183 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1187 if ((addr
>> 32) != 0)
1188 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1192 if ((addr
>> 48) != 0)
1193 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1198 /* Write a SUBS instruction into *BUF.
1202 This instruction update the condition flags.
1204 RD is the destination register.
1205 RN and RM are the source registers. */
1208 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1209 struct aarch64_register rn
, struct aarch64_operand operand
)
1211 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1214 /* Write a CMP instruction into *BUF.
1218 This instruction is an alias of SUBS xzr, rn, rm.
1220 RN and RM are the registers to compare. */
1223 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1224 struct aarch64_operand operand
)
1226 return emit_subs (buf
, xzr
, rn
, operand
);
1229 /* Write a AND instruction into *BUF.
1233 RD is the destination register.
1234 RN and RM are the source registers. */
1237 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1238 struct aarch64_register rn
, struct aarch64_register rm
)
1240 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1243 /* Write a ORR instruction into *BUF.
1247 RD is the destination register.
1248 RN and RM are the source registers. */
1251 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1252 struct aarch64_register rn
, struct aarch64_register rm
)
1254 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1257 /* Write a ORN instruction into *BUF.
1261 RD is the destination register.
1262 RN and RM are the source registers. */
1265 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1266 struct aarch64_register rn
, struct aarch64_register rm
)
1268 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1271 /* Write a EOR instruction into *BUF.
1275 RD is the destination register.
1276 RN and RM are the source registers. */
1279 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1280 struct aarch64_register rn
, struct aarch64_register rm
)
1282 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1285 /* Write a MVN instruction into *BUF.
1289 This is an alias for ORN rd, xzr, rm.
1291 RD is the destination register.
1292 RM is the source register. */
1295 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1296 struct aarch64_register rm
)
1298 return emit_orn (buf
, rd
, xzr
, rm
);
1301 /* Write a LSLV instruction into *BUF.
1305 RD is the destination register.
1306 RN and RM are the source registers. */
1309 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1310 struct aarch64_register rn
, struct aarch64_register rm
)
1312 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1315 /* Write a LSRV instruction into *BUF.
1319 RD is the destination register.
1320 RN and RM are the source registers. */
1323 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1324 struct aarch64_register rn
, struct aarch64_register rm
)
1326 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1329 /* Write a ASRV instruction into *BUF.
1333 RD is the destination register.
1334 RN and RM are the source registers. */
1337 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1338 struct aarch64_register rn
, struct aarch64_register rm
)
1340 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1343 /* Write a MUL instruction into *BUF.
1347 RD is the destination register.
1348 RN and RM are the source registers. */
1351 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1352 struct aarch64_register rn
, struct aarch64_register rm
)
1354 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1357 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1361 RT is the destination register.
1362 SYSTEM_REG is special purpose register to read. */
1365 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1366 enum aarch64_system_control_registers system_reg
)
1368 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1369 | ENCODE (rt
.num
, 5, 0));
1372 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1376 SYSTEM_REG is special purpose register to write.
1377 RT is the input register. */
1380 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1381 struct aarch64_register rt
)
1383 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1384 | ENCODE (rt
.num
, 5, 0));
1387 /* Write a SEVL instruction into *BUF.
1389 This is a hint instruction telling the hardware to trigger an event. */
1392 emit_sevl (uint32_t *buf
)
1394 return aarch64_emit_insn (buf
, SEVL
);
1397 /* Write a WFE instruction into *BUF.
1399 This is a hint instruction telling the hardware to wait for an event. */
1402 emit_wfe (uint32_t *buf
)
1404 return aarch64_emit_insn (buf
, WFE
);
1407 /* Write a SBFM instruction into *BUF.
1409 SBFM rd, rn, #immr, #imms
1411 This instruction moves the bits from #immr to #imms into the
1412 destination, sign extending the result.
1414 RD is the destination register.
1415 RN is the source register.
1416 IMMR is the bit number to start at (least significant bit).
1417 IMMS is the bit number to stop at (most significant bit). */
1420 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1421 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1423 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1424 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1426 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1427 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1428 | ENCODE (rd
.num
, 5, 0));
1431 /* Write a SBFX instruction into *BUF.
1433 SBFX rd, rn, #lsb, #width
1435 This instruction moves #width bits from #lsb into the destination, sign
1436 extending the result. This is an alias for:
1438 SBFM rd, rn, #lsb, #(lsb + width - 1)
1440 RD is the destination register.
1441 RN is the source register.
1442 LSB is the bit number to start at (least significant bit).
1443 WIDTH is the number of bits to move. */
1446 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1447 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1449 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1452 /* Write a UBFM instruction into *BUF.
1454 UBFM rd, rn, #immr, #imms
1456 This instruction moves the bits from #immr to #imms into the
1457 destination, extending the result with zeros.
1459 RD is the destination register.
1460 RN is the source register.
1461 IMMR is the bit number to start at (least significant bit).
1462 IMMS is the bit number to stop at (most significant bit). */
1465 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1466 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1468 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1469 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1471 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1472 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1473 | ENCODE (rd
.num
, 5, 0));
1476 /* Write a UBFX instruction into *BUF.
1478 UBFX rd, rn, #lsb, #width
1480 This instruction moves #width bits from #lsb into the destination,
1481 extending the result with zeros. This is an alias for:
1483 UBFM rd, rn, #lsb, #(lsb + width - 1)
1485 RD is the destination register.
1486 RN is the source register.
1487 LSB is the bit number to start at (least significant bit).
1488 WIDTH is the number of bits to move. */
1491 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1492 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1494 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1497 /* Write a CSINC instruction into *BUF.
1499 CSINC rd, rn, rm, cond
1501 This instruction conditionally increments rn or rm and places the result
1502 in rd. rn is chosen is the condition is true.
1504 RD is the destination register.
1505 RN and RM are the source registers.
1506 COND is the encoded condition. */
1509 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1510 struct aarch64_register rn
, struct aarch64_register rm
,
1513 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1515 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1516 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1517 | ENCODE (rd
.num
, 5, 0));
1520 /* Write a CSET instruction into *BUF.
1524 This instruction conditionally write 1 or 0 in the destination register.
1525 1 is written if the condition is true. This is an alias for:
1527 CSINC rd, xzr, xzr, !cond
1529 Note that the condition needs to be inverted.
1531 RD is the destination register.
1532 RN and RM are the source registers.
1533 COND is the encoded condition. */
1536 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1538 /* The least significant bit of the condition needs toggling in order to
1540 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1543 /* Write LEN instructions from BUF into the inferior memory at *TO.
1545 Note instructions are always little endian on AArch64, unlike data. */
1548 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1550 size_t byte_len
= len
* sizeof (uint32_t);
1551 #if (__BYTE_ORDER == __BIG_ENDIAN)
1552 uint32_t *le_buf
= xmalloc (byte_len
);
1555 for (i
= 0; i
< len
; i
++)
1556 le_buf
[i
] = htole32 (buf
[i
]);
1558 write_inferior_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1562 write_inferior_memory (*to
, (const unsigned char *) buf
, byte_len
);
1568 /* Sub-class of struct aarch64_insn_data, store information of
1569 instruction relocation for fast tracepoint. Visitor can
1570 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1571 the relocated instructions in buffer pointed by INSN_PTR. */
1573 struct aarch64_insn_relocation_data
1575 struct aarch64_insn_data base
;
1577 /* The new address the instruction is relocated to. */
1579 /* Pointer to the buffer of relocated instruction(s). */
1583 /* Implementation of aarch64_insn_visitor method "b". */
1586 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1587 struct aarch64_insn_data
*data
)
1589 struct aarch64_insn_relocation_data
*insn_reloc
1590 = (struct aarch64_insn_relocation_data
*) data
;
1592 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1594 if (can_encode_int32 (new_offset
, 28))
1595 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1598 /* Implementation of aarch64_insn_visitor method "b_cond". */
1601 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1602 struct aarch64_insn_data
*data
)
1604 struct aarch64_insn_relocation_data
*insn_reloc
1605 = (struct aarch64_insn_relocation_data
*) data
;
1607 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1609 if (can_encode_int32 (new_offset
, 21))
1611 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1614 else if (can_encode_int32 (new_offset
, 28))
1616 /* The offset is out of range for a conditional branch
1617 instruction but not for a unconditional branch. We can use
1618 the following instructions instead:
1620 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1621 B NOT_TAKEN ; Else jump over TAKEN and continue.
1628 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1629 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1630 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1634 /* Implementation of aarch64_insn_visitor method "cb". */
1637 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1638 const unsigned rn
, int is64
,
1639 struct aarch64_insn_data
*data
)
1641 struct aarch64_insn_relocation_data
*insn_reloc
1642 = (struct aarch64_insn_relocation_data
*) data
;
1644 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1646 if (can_encode_int32 (new_offset
, 21))
1648 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1649 aarch64_register (rn
, is64
), new_offset
);
1651 else if (can_encode_int32 (new_offset
, 28))
1653 /* The offset is out of range for a compare and branch
1654 instruction but not for a unconditional branch. We can use
1655 the following instructions instead:
1657 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1658 B NOT_TAKEN ; Else jump over TAKEN and continue.
1664 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1665 aarch64_register (rn
, is64
), 8);
1666 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1667 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1671 /* Implementation of aarch64_insn_visitor method "tb". */
1674 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1675 const unsigned rt
, unsigned bit
,
1676 struct aarch64_insn_data
*data
)
1678 struct aarch64_insn_relocation_data
*insn_reloc
1679 = (struct aarch64_insn_relocation_data
*) data
;
1681 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1683 if (can_encode_int32 (new_offset
, 16))
1685 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1686 aarch64_register (rt
, 1), new_offset
);
1688 else if (can_encode_int32 (new_offset
, 28))
1690 /* The offset is out of range for a test bit and branch
1691 instruction but not for a unconditional branch. We can use
1692 the following instructions instead:
1694 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1695 B NOT_TAKEN ; Else jump over TAKEN and continue.
1701 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1702 aarch64_register (rt
, 1), 8);
1703 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1704 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1709 /* Implementation of aarch64_insn_visitor method "adr". */
1712 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1714 struct aarch64_insn_data
*data
)
1716 struct aarch64_insn_relocation_data
*insn_reloc
1717 = (struct aarch64_insn_relocation_data
*) data
;
1718 /* We know exactly the address the ADR{P,} instruction will compute.
1719 We can just write it to the destination register. */
1720 CORE_ADDR address
= data
->insn_addr
+ offset
;
1724 /* Clear the lower 12 bits of the offset to get the 4K page. */
1725 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1726 aarch64_register (rd
, 1),
1730 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1731 aarch64_register (rd
, 1), address
);
1734 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1737 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1738 const unsigned rt
, const int is64
,
1739 struct aarch64_insn_data
*data
)
1741 struct aarch64_insn_relocation_data
*insn_reloc
1742 = (struct aarch64_insn_relocation_data
*) data
;
1743 CORE_ADDR address
= data
->insn_addr
+ offset
;
1745 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1746 aarch64_register (rt
, 1), address
);
1748 /* We know exactly what address to load from, and what register we
1751 MOV xd, #(oldloc + offset)
1752 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1755 LDR xd, [xd] ; or LDRSW xd, [xd]
1760 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1761 aarch64_register (rt
, 1),
1762 aarch64_register (rt
, 1),
1763 offset_memory_operand (0));
1765 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1766 aarch64_register (rt
, is64
),
1767 aarch64_register (rt
, 1),
1768 offset_memory_operand (0));
1771 /* Implementation of aarch64_insn_visitor method "others". */
1774 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1775 struct aarch64_insn_data
*data
)
1777 struct aarch64_insn_relocation_data
*insn_reloc
1778 = (struct aarch64_insn_relocation_data
*) data
;
1780 /* The instruction is not PC relative. Just re-emit it at the new
1782 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1785 static const struct aarch64_insn_visitor visitor
=
1787 aarch64_ftrace_insn_reloc_b
,
1788 aarch64_ftrace_insn_reloc_b_cond
,
1789 aarch64_ftrace_insn_reloc_cb
,
1790 aarch64_ftrace_insn_reloc_tb
,
1791 aarch64_ftrace_insn_reloc_adr
,
1792 aarch64_ftrace_insn_reloc_ldr_literal
,
1793 aarch64_ftrace_insn_reloc_others
,
1796 /* Implementation of linux_target_ops method
1797 "install_fast_tracepoint_jump_pad". */
1800 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1802 CORE_ADDR collector
,
1805 CORE_ADDR
*jump_entry
,
1806 CORE_ADDR
*trampoline
,
1807 ULONGEST
*trampoline_size
,
1808 unsigned char *jjump_pad_insn
,
1809 ULONGEST
*jjump_pad_insn_size
,
1810 CORE_ADDR
*adjusted_insn_addr
,
1811 CORE_ADDR
*adjusted_insn_addr_end
,
1819 CORE_ADDR buildaddr
= *jump_entry
;
1820 struct aarch64_insn_relocation_data insn_data
;
1822 /* We need to save the current state on the stack both to restore it
1823 later and to collect register values when the tracepoint is hit.
1825 The saved registers are pushed in a layout that needs to be in sync
1826 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1827 the supply_fast_tracepoint_registers function will fill in the
1828 register cache from a pointer to saved registers on the stack we build
1831 For simplicity, we set the size of each cell on the stack to 16 bytes.
1832 This way one cell can hold any register type, from system registers
1833 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1834 has to be 16 bytes aligned anyway.
1836 Note that the CPSR register does not exist on AArch64. Instead we
1837 can access system bits describing the process state with the
1838 MRS/MSR instructions, namely the condition flags. We save them as
1839 if they are part of a CPSR register because that's how GDB
1840 interprets these system bits. At the moment, only the condition
1841 flags are saved in CPSR (NZCV).
1843 Stack layout, each cell is 16 bytes (descending):
1845 High *-------- SIMD&FP registers from 31 down to 0. --------*
1851 *---- General purpose registers from 30 down to 0. ----*
1857 *------------- Special purpose registers. -------------*
1860 | CPSR (NZCV) | 5 cells
1863 *------------- collecting_t object --------------------*
1864 | TPIDR_EL0 | struct tracepoint * |
1865 Low *------------------------------------------------------*
1867 After this stack is set up, we issue a call to the collector, passing
1868 it the saved registers at (SP + 16). */
1870 /* Push SIMD&FP registers on the stack:
1872 SUB sp, sp, #(32 * 16)
1874 STP q30, q31, [sp, #(30 * 16)]
1879 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1880 for (i
= 30; i
>= 0; i
-= 2)
1881 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1883 /* Push general puspose registers on the stack. Note that we do not need
1884 to push x31 as it represents the xzr register and not the stack
1885 pointer in a STR instruction.
1887 SUB sp, sp, #(31 * 16)
1889 STR x30, [sp, #(30 * 16)]
1894 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1895 for (i
= 30; i
>= 0; i
-= 1)
1896 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1897 offset_memory_operand (i
* 16));
1899 /* Make space for 5 more cells.
1901 SUB sp, sp, #(5 * 16)
1904 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1909 ADD x4, sp, #((32 + 31 + 5) * 16)
1910 STR x4, [sp, #(4 * 16)]
1913 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1914 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1916 /* Save PC (tracepoint address):
1921 STR x3, [sp, #(3 * 16)]
1925 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1926 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1928 /* Save CPSR (NZCV), FPSR and FPCR:
1934 STR x2, [sp, #(2 * 16)]
1935 STR x1, [sp, #(1 * 16)]
1936 STR x0, [sp, #(0 * 16)]
1939 p
+= emit_mrs (p
, x2
, NZCV
);
1940 p
+= emit_mrs (p
, x1
, FPSR
);
1941 p
+= emit_mrs (p
, x0
, FPCR
);
1942 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
1943 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
1944 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
1946 /* Push the collecting_t object. It consist of the address of the
1947 tracepoint and an ID for the current thread. We get the latter by
1948 reading the tpidr_el0 system register. It corresponds to the
1949 NT_ARM_TLS register accessible with ptrace.
1956 STP x0, x1, [sp, #-16]!
1960 p
+= emit_mov_addr (p
, x0
, tpoint
);
1961 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
1962 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
1966 The shared memory for the lock is at lockaddr. It will hold zero
1967 if no-one is holding the lock, otherwise it contains the address of
1968 the collecting_t object on the stack of the thread which acquired it.
1970 At this stage, the stack pointer points to this thread's collecting_t
1973 We use the following registers:
1974 - x0: Address of the lock.
1975 - x1: Pointer to collecting_t object.
1976 - x2: Scratch register.
1982 ; Trigger an event local to this core. So the following WFE
1983 ; instruction is ignored.
1986 ; Wait for an event. The event is triggered by either the SEVL
1987 ; or STLR instructions (store release).
1990 ; Atomically read at lockaddr. This marks the memory location as
1991 ; exclusive. This instruction also has memory constraints which
1992 ; make sure all previous data reads and writes are done before
1996 ; Try again if another thread holds the lock.
1999 ; We can lock it! Write the address of the collecting_t object.
2000 ; This instruction will fail if the memory location is not marked
2001 ; as exclusive anymore. If it succeeds, it will remove the
2002 ; exclusive mark on the memory location. This way, if another
2003 ; thread executes this instruction before us, we will fail and try
2010 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2011 p
+= emit_mov (p
, x1
, register_operand (sp
));
2015 p
+= emit_ldaxr (p
, x2
, x0
);
2016 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2017 p
+= emit_stxr (p
, w2
, x1
, x0
);
2018 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2020 /* Call collector (struct tracepoint *, unsigned char *):
2025 ; Saved registers start after the collecting_t object.
2028 ; We use an intra-procedure-call scratch register.
2029 MOV ip0, #(collector)
2032 ; And call back to C!
2037 p
+= emit_mov_addr (p
, x0
, tpoint
);
2038 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2040 p
+= emit_mov_addr (p
, ip0
, collector
);
2041 p
+= emit_blr (p
, ip0
);
2043 /* Release the lock.
2048 ; This instruction is a normal store with memory ordering
2049 ; constraints. Thanks to this we do not have to put a data
2050 ; barrier instruction to make sure all data read and writes are done
2051 ; before this instruction is executed. Furthermore, this instrucion
2052 ; will trigger an event, letting other threads know they can grab
2057 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2058 p
+= emit_stlr (p
, xzr
, x0
);
2060 /* Free collecting_t object:
2065 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2067 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2068 registers from the stack.
2070 LDR x2, [sp, #(2 * 16)]
2071 LDR x1, [sp, #(1 * 16)]
2072 LDR x0, [sp, #(0 * 16)]
2078 ADD sp, sp #(5 * 16)
2081 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2082 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2083 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2084 p
+= emit_msr (p
, NZCV
, x2
);
2085 p
+= emit_msr (p
, FPSR
, x1
);
2086 p
+= emit_msr (p
, FPCR
, x0
);
2088 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2090 /* Pop general purpose registers:
2094 LDR x30, [sp, #(30 * 16)]
2096 ADD sp, sp, #(31 * 16)
2099 for (i
= 0; i
<= 30; i
+= 1)
2100 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2101 offset_memory_operand (i
* 16));
2102 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2104 /* Pop SIMD&FP registers:
2108 LDP q30, q31, [sp, #(30 * 16)]
2110 ADD sp, sp, #(32 * 16)
2113 for (i
= 0; i
<= 30; i
+= 2)
2114 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2115 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2117 /* Write the code into the inferior memory. */
2118 append_insns (&buildaddr
, p
- buf
, buf
);
2120 /* Now emit the relocated instruction. */
2121 *adjusted_insn_addr
= buildaddr
;
2122 target_read_uint32 (tpaddr
, &insn
);
2124 insn_data
.base
.insn_addr
= tpaddr
;
2125 insn_data
.new_addr
= buildaddr
;
2126 insn_data
.insn_ptr
= buf
;
2128 aarch64_relocate_instruction (insn
, &visitor
,
2129 (struct aarch64_insn_data
*) &insn_data
);
2131 /* We may not have been able to relocate the instruction. */
2132 if (insn_data
.insn_ptr
== buf
)
2135 "E.Could not relocate instruction from %s to %s.",
2136 core_addr_to_string_nz (tpaddr
),
2137 core_addr_to_string_nz (buildaddr
));
2141 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2142 *adjusted_insn_addr_end
= buildaddr
;
2144 /* Go back to the start of the buffer. */
2147 /* Emit a branch back from the jump pad. */
2148 offset
= (tpaddr
+ orig_size
- buildaddr
);
2149 if (!can_encode_int32 (offset
, 28))
2152 "E.Jump back from jump pad too far from tracepoint "
2153 "(offset 0x%" PRIx32
" cannot be encoded in 28 bits).",
2158 p
+= emit_b (p
, 0, offset
);
2159 append_insns (&buildaddr
, p
- buf
, buf
);
2161 /* Give the caller a branch instruction into the jump pad. */
2162 offset
= (*jump_entry
- tpaddr
);
2163 if (!can_encode_int32 (offset
, 28))
2166 "E.Jump pad too far from tracepoint "
2167 "(offset 0x%" PRIx32
" cannot be encoded in 28 bits).",
2172 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2173 *jjump_pad_insn_size
= 4;
2175 /* Return the end address of our pad. */
2176 *jump_entry
= buildaddr
;
2181 /* Helper function writing LEN instructions from START into
2182 current_insn_ptr. */
2185 emit_ops_insns (const uint32_t *start
, int len
)
2187 CORE_ADDR buildaddr
= current_insn_ptr
;
2190 debug_printf ("Adding %d instrucions at %s\n",
2191 len
, paddress (buildaddr
));
2193 append_insns (&buildaddr
, len
, start
);
2194 current_insn_ptr
= buildaddr
;
2197 /* Pop a register from the stack. */
2200 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2202 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2205 /* Push a register on the stack. */
2208 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2210 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2213 /* Implementation of emit_ops method "emit_prologue". */
2216 aarch64_emit_prologue (void)
2221 /* This function emit a prologue for the following function prototype:
2223 enum eval_result_type f (unsigned char *regs,
2226 The first argument is a buffer of raw registers. The second
2227 argument is the result of
2228 evaluating the expression, which will be set to whatever is on top of
2229 the stack at the end.
2231 The stack set up by the prologue is as such:
2233 High *------------------------------------------------------*
2236 | x1 (ULONGEST *value) |
2237 | x0 (unsigned char *regs) |
2238 Low *------------------------------------------------------*
2240 As we are implementing a stack machine, each opcode can expand the
2241 stack so we never know how far we are from the data saved by this
2242 prologue. In order to be able refer to value and regs later, we save
2243 the current stack pointer in the frame pointer. This way, it is not
2244 clobbered when calling C functions.
2246 Finally, throughtout every operation, we are using register x0 as the
2247 top of the stack, and x1 as a scratch register. */
2249 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2250 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2251 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2253 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2256 emit_ops_insns (buf
, p
- buf
);
2259 /* Implementation of emit_ops method "emit_epilogue". */
2262 aarch64_emit_epilogue (void)
2267 /* Store the result of the expression (x0) in *value. */
2268 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2269 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2270 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2272 /* Restore the previous state. */
2273 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2274 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2276 /* Return expr_eval_no_error. */
2277 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2278 p
+= emit_ret (p
, lr
);
2280 emit_ops_insns (buf
, p
- buf
);
2283 /* Implementation of emit_ops method "emit_add". */
2286 aarch64_emit_add (void)
2291 p
+= emit_pop (p
, x1
);
2292 p
+= emit_add (p
, x0
, x0
, register_operand (x1
));
2294 emit_ops_insns (buf
, p
- buf
);
2297 /* Implementation of emit_ops method "emit_sub". */
2300 aarch64_emit_sub (void)
2305 p
+= emit_pop (p
, x1
);
2306 p
+= emit_sub (p
, x0
, x0
, register_operand (x1
));
2308 emit_ops_insns (buf
, p
- buf
);
2311 /* Implementation of emit_ops method "emit_mul". */
2314 aarch64_emit_mul (void)
2319 p
+= emit_pop (p
, x1
);
2320 p
+= emit_mul (p
, x0
, x1
, x0
);
2322 emit_ops_insns (buf
, p
- buf
);
2325 /* Implementation of emit_ops method "emit_lsh". */
2328 aarch64_emit_lsh (void)
2333 p
+= emit_pop (p
, x1
);
2334 p
+= emit_lslv (p
, x0
, x1
, x0
);
2336 emit_ops_insns (buf
, p
- buf
);
2339 /* Implementation of emit_ops method "emit_rsh_signed". */
2342 aarch64_emit_rsh_signed (void)
2347 p
+= emit_pop (p
, x1
);
2348 p
+= emit_asrv (p
, x0
, x1
, x0
);
2350 emit_ops_insns (buf
, p
- buf
);
2353 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2356 aarch64_emit_rsh_unsigned (void)
2361 p
+= emit_pop (p
, x1
);
2362 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2364 emit_ops_insns (buf
, p
- buf
);
2367 /* Implementation of emit_ops method "emit_ext". */
2370 aarch64_emit_ext (int arg
)
2375 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2377 emit_ops_insns (buf
, p
- buf
);
2380 /* Implementation of emit_ops method "emit_log_not". */
2383 aarch64_emit_log_not (void)
2388 /* If the top of the stack is 0, replace it with 1. Else replace it with
2391 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2392 p
+= emit_cset (p
, x0
, EQ
);
2394 emit_ops_insns (buf
, p
- buf
);
2397 /* Implementation of emit_ops method "emit_bit_and". */
2400 aarch64_emit_bit_and (void)
2405 p
+= emit_pop (p
, x1
);
2406 p
+= emit_and (p
, x0
, x0
, x1
);
2408 emit_ops_insns (buf
, p
- buf
);
2411 /* Implementation of emit_ops method "emit_bit_or". */
2414 aarch64_emit_bit_or (void)
2419 p
+= emit_pop (p
, x1
);
2420 p
+= emit_orr (p
, x0
, x0
, x1
);
2422 emit_ops_insns (buf
, p
- buf
);
2425 /* Implementation of emit_ops method "emit_bit_xor". */
2428 aarch64_emit_bit_xor (void)
2433 p
+= emit_pop (p
, x1
);
2434 p
+= emit_eor (p
, x0
, x0
, x1
);
2436 emit_ops_insns (buf
, p
- buf
);
2439 /* Implementation of emit_ops method "emit_bit_not". */
2442 aarch64_emit_bit_not (void)
2447 p
+= emit_mvn (p
, x0
, x0
);
2449 emit_ops_insns (buf
, p
- buf
);
2452 /* Implementation of emit_ops method "emit_equal". */
2455 aarch64_emit_equal (void)
2460 p
+= emit_pop (p
, x1
);
2461 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2462 p
+= emit_cset (p
, x0
, EQ
);
2464 emit_ops_insns (buf
, p
- buf
);
2467 /* Implementation of emit_ops method "emit_less_signed". */
2470 aarch64_emit_less_signed (void)
2475 p
+= emit_pop (p
, x1
);
2476 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2477 p
+= emit_cset (p
, x0
, LT
);
2479 emit_ops_insns (buf
, p
- buf
);
2482 /* Implementation of emit_ops method "emit_less_unsigned". */
2485 aarch64_emit_less_unsigned (void)
2490 p
+= emit_pop (p
, x1
);
2491 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2492 p
+= emit_cset (p
, x0
, LO
);
2494 emit_ops_insns (buf
, p
- buf
);
2497 /* Implementation of emit_ops method "emit_ref". */
2500 aarch64_emit_ref (int size
)
2508 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2511 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2514 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2517 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2520 /* Unknown size, bail on compilation. */
2525 emit_ops_insns (buf
, p
- buf
);
2528 /* Implementation of emit_ops method "emit_if_goto". */
2531 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2536 /* The Z flag is set or cleared here. */
2537 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2538 /* This instruction must not change the Z flag. */
2539 p
+= emit_pop (p
, x0
);
2540 /* Branch over the next instruction if x0 == 0. */
2541 p
+= emit_bcond (p
, EQ
, 8);
2543 /* The NOP instruction will be patched with an unconditional branch. */
2545 *offset_p
= (p
- buf
) * 4;
2550 emit_ops_insns (buf
, p
- buf
);
2553 /* Implementation of emit_ops method "emit_goto". */
2556 aarch64_emit_goto (int *offset_p
, int *size_p
)
2561 /* The NOP instruction will be patched with an unconditional branch. */
2568 emit_ops_insns (buf
, p
- buf
);
2571 /* Implementation of emit_ops method "write_goto_address". */
2574 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2578 emit_b (&insn
, 0, to
- from
);
2579 append_insns (&from
, 1, &insn
);
2582 /* Implementation of emit_ops method "emit_const". */
2585 aarch64_emit_const (LONGEST num
)
2590 p
+= emit_mov_addr (p
, x0
, num
);
2592 emit_ops_insns (buf
, p
- buf
);
2595 /* Implementation of emit_ops method "emit_call". */
2598 aarch64_emit_call (CORE_ADDR fn
)
2603 p
+= emit_mov_addr (p
, ip0
, fn
);
2604 p
+= emit_blr (p
, ip0
);
2606 emit_ops_insns (buf
, p
- buf
);
2609 /* Implementation of emit_ops method "emit_reg". */
2612 aarch64_emit_reg (int reg
)
2617 /* Set x0 to unsigned char *regs. */
2618 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2619 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2620 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2622 emit_ops_insns (buf
, p
- buf
);
2624 aarch64_emit_call (get_raw_reg_func_addr ());
2627 /* Implementation of emit_ops method "emit_pop". */
2630 aarch64_emit_pop (void)
2635 p
+= emit_pop (p
, x0
);
2637 emit_ops_insns (buf
, p
- buf
);
2640 /* Implementation of emit_ops method "emit_stack_flush". */
2643 aarch64_emit_stack_flush (void)
2648 p
+= emit_push (p
, x0
);
2650 emit_ops_insns (buf
, p
- buf
);
2653 /* Implementation of emit_ops method "emit_zero_ext". */
2656 aarch64_emit_zero_ext (int arg
)
2661 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2663 emit_ops_insns (buf
, p
- buf
);
2666 /* Implementation of emit_ops method "emit_swap". */
2669 aarch64_emit_swap (void)
2674 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2675 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2676 p
+= emit_mov (p
, x0
, register_operand (x1
));
2678 emit_ops_insns (buf
, p
- buf
);
2681 /* Implementation of emit_ops method "emit_stack_adjust". */
2684 aarch64_emit_stack_adjust (int n
)
2686 /* This is not needed with our design. */
2690 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2692 emit_ops_insns (buf
, p
- buf
);
2695 /* Implementation of emit_ops method "emit_int_call_1". */
2698 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2703 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2705 emit_ops_insns (buf
, p
- buf
);
2707 aarch64_emit_call (fn
);
2710 /* Implementation of emit_ops method "emit_void_call_2". */
2713 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2718 /* Push x0 on the stack. */
2719 aarch64_emit_stack_flush ();
2721 /* Setup arguments for the function call:
2724 x1: top of the stack
2729 p
+= emit_mov (p
, x1
, register_operand (x0
));
2730 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2732 emit_ops_insns (buf
, p
- buf
);
2734 aarch64_emit_call (fn
);
2737 aarch64_emit_pop ();
2740 /* Implementation of emit_ops method "emit_eq_goto". */
2743 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2748 p
+= emit_pop (p
, x1
);
2749 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2750 /* Branch over the next instruction if x0 != x1. */
2751 p
+= emit_bcond (p
, NE
, 8);
2752 /* The NOP instruction will be patched with an unconditional branch. */
2754 *offset_p
= (p
- buf
) * 4;
2759 emit_ops_insns (buf
, p
- buf
);
2762 /* Implementation of emit_ops method "emit_ne_goto". */
2765 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2770 p
+= emit_pop (p
, x1
);
2771 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2772 /* Branch over the next instruction if x0 == x1. */
2773 p
+= emit_bcond (p
, EQ
, 8);
2774 /* The NOP instruction will be patched with an unconditional branch. */
2776 *offset_p
= (p
- buf
) * 4;
2781 emit_ops_insns (buf
, p
- buf
);
2784 /* Implementation of emit_ops method "emit_lt_goto". */
2787 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2792 p
+= emit_pop (p
, x1
);
2793 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2794 /* Branch over the next instruction if x0 >= x1. */
2795 p
+= emit_bcond (p
, GE
, 8);
2796 /* The NOP instruction will be patched with an unconditional branch. */
2798 *offset_p
= (p
- buf
) * 4;
2803 emit_ops_insns (buf
, p
- buf
);
2806 /* Implementation of emit_ops method "emit_le_goto". */
2809 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2814 p
+= emit_pop (p
, x1
);
2815 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2816 /* Branch over the next instruction if x0 > x1. */
2817 p
+= emit_bcond (p
, GT
, 8);
2818 /* The NOP instruction will be patched with an unconditional branch. */
2820 *offset_p
= (p
- buf
) * 4;
2825 emit_ops_insns (buf
, p
- buf
);
2828 /* Implementation of emit_ops method "emit_gt_goto". */
2831 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2836 p
+= emit_pop (p
, x1
);
2837 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2838 /* Branch over the next instruction if x0 <= x1. */
2839 p
+= emit_bcond (p
, LE
, 8);
2840 /* The NOP instruction will be patched with an unconditional branch. */
2842 *offset_p
= (p
- buf
) * 4;
2847 emit_ops_insns (buf
, p
- buf
);
2850 /* Implementation of emit_ops method "emit_ge_got". */
2853 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2858 p
+= emit_pop (p
, x1
);
2859 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2860 /* Branch over the next instruction if x0 <= x1. */
2861 p
+= emit_bcond (p
, LT
, 8);
2862 /* The NOP instruction will be patched with an unconditional branch. */
2864 *offset_p
= (p
- buf
) * 4;
2869 emit_ops_insns (buf
, p
- buf
);
2872 static struct emit_ops aarch64_emit_ops_impl
=
2874 aarch64_emit_prologue
,
2875 aarch64_emit_epilogue
,
2880 aarch64_emit_rsh_signed
,
2881 aarch64_emit_rsh_unsigned
,
2883 aarch64_emit_log_not
,
2884 aarch64_emit_bit_and
,
2885 aarch64_emit_bit_or
,
2886 aarch64_emit_bit_xor
,
2887 aarch64_emit_bit_not
,
2889 aarch64_emit_less_signed
,
2890 aarch64_emit_less_unsigned
,
2892 aarch64_emit_if_goto
,
2894 aarch64_write_goto_address
,
2899 aarch64_emit_stack_flush
,
2900 aarch64_emit_zero_ext
,
2902 aarch64_emit_stack_adjust
,
2903 aarch64_emit_int_call_1
,
2904 aarch64_emit_void_call_2
,
2905 aarch64_emit_eq_goto
,
2906 aarch64_emit_ne_goto
,
2907 aarch64_emit_lt_goto
,
2908 aarch64_emit_le_goto
,
2909 aarch64_emit_gt_goto
,
2910 aarch64_emit_ge_got
,
2913 /* Implementation of linux_target_ops method "emit_ops". */
2915 static struct emit_ops
*
2916 aarch64_emit_ops (void)
2918 return &aarch64_emit_ops_impl
;
2921 /* Implementation of linux_target_ops method
2922 "get_min_fast_tracepoint_insn_len". */
2925 aarch64_get_min_fast_tracepoint_insn_len (void)
2930 /* Implementation of linux_target_ops method "supports_range_stepping". */
2933 aarch64_supports_range_stepping (void)
2938 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2940 static const gdb_byte
*
2941 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
2943 *size
= aarch64_breakpoint_len
;
2944 return aarch64_breakpoint
;
2947 struct linux_target_ops the_low_target
=
2951 aarch64_cannot_fetch_register
,
2952 aarch64_cannot_store_register
,
2953 NULL
, /* fetch_register */
2956 NULL
, /* breakpoint_kind_from_pc */
2957 aarch64_sw_breakpoint_from_kind
,
2958 NULL
, /* breakpoint_reinsert_addr */
2959 0, /* decr_pc_after_break */
2960 aarch64_breakpoint_at
,
2961 aarch64_supports_z_point_type
,
2962 aarch64_insert_point
,
2963 aarch64_remove_point
,
2964 aarch64_stopped_by_watchpoint
,
2965 aarch64_stopped_data_address
,
2966 NULL
, /* collect_ptrace_register */
2967 NULL
, /* supply_ptrace_register */
2968 aarch64_linux_siginfo_fixup
,
2969 aarch64_linux_new_process
,
2970 aarch64_linux_new_thread
,
2971 aarch64_linux_new_fork
,
2972 aarch64_linux_prepare_to_resume
,
2973 NULL
, /* process_qsupported */
2974 aarch64_supports_tracepoints
,
2975 aarch64_get_thread_area
,
2976 aarch64_install_fast_tracepoint_jump_pad
,
2978 aarch64_get_min_fast_tracepoint_insn_len
,
2979 aarch64_supports_range_stepping
,
2983 initialize_low_arch (void)
2985 init_registers_aarch64 ();
2987 initialize_low_arch_aarch32 ();
2989 initialize_regsets_info (&aarch64_regsets_info
);