AArch64: Add target description/feature for MTE registers
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
3666a048 4 Copyright (C) 2009-2021 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
04245125 43#include "arch/aarch64-mte-linux.h"
7cc17433 44#include "linux-aarch32-tdesc.h"
d6d7ce56 45#include "linux-aarch64-tdesc.h"
fefa175e 46#include "nat/aarch64-sve-linux-ptrace.h"
02895270 47#include "tdesc.h"
176eb98c 48
176eb98c
MS
49#ifdef HAVE_SYS_REG_H
50#include <sys/reg.h>
51#endif
52
ef0478f6
TBA
53/* Linux target op definitions for the AArch64 architecture. */
54
55class aarch64_target : public linux_process_target
56{
57public:
58
aa8d21c9
TBA
59 const regs_info *get_regs_info () override;
60
06250e4e
TBA
61 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
62
63 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
64
3ca4edb6
TBA
65 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
66
007c9b97
TBA
67 bool supports_z_point_type (char z_type) override;
68
47f70aa7
TBA
69 bool supports_tracepoints () override;
70
809a0c35
TBA
71 bool supports_fast_tracepoints () override;
72
73 int install_fast_tracepoint_jump_pad
74 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
75 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
76 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
77 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
78 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
79 char *err) override;
80
81 int get_min_fast_tracepoint_insn_len () override;
82
ab64c999
TBA
83 struct emit_ops *emit_ops () override;
84
797bcff5
TBA
85protected:
86
87 void low_arch_setup () override;
daca57a7
TBA
88
89 bool low_cannot_fetch_register (int regno) override;
90
91 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
92
93 bool low_supports_breakpoints () override;
94
95 CORE_ADDR low_get_pc (regcache *regcache) override;
96
97 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
98
99 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
100
101 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
102 int size, raw_breakpoint *bp) override;
103
104 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
105 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
106
107 bool low_stopped_by_watchpoint () override;
108
109 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
110
111 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
112 int direction) override;
fd000fb3
TBA
113
114 arch_process_info *low_new_process () override;
115
116 void low_delete_process (arch_process_info *info) override;
117
118 void low_new_thread (lwp_info *) override;
119
120 void low_delete_thread (arch_lwp_info *) override;
121
122 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
123
124 void low_prepare_to_resume (lwp_info *lwp) override;
13e567af
TBA
125
126 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
9cfd8715
TBA
127
128 bool low_supports_range_stepping () override;
9eedd27d
TBA
129
130 bool low_supports_catch_syscall () override;
131
132 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
ef0478f6
TBA
133};
134
135/* The singleton target ops object. */
136
137static aarch64_target the_aarch64_target;
138
daca57a7
TBA
139bool
140aarch64_target::low_cannot_fetch_register (int regno)
141{
142 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
143 "is not implemented by the target");
144}
145
146bool
147aarch64_target::low_cannot_store_register (int regno)
148{
149 gdb_assert_not_reached ("linux target op low_cannot_store_register "
150 "is not implemented by the target");
151}
152
d7599cc0
TBA
153void
154aarch64_target::low_prepare_to_resume (lwp_info *lwp)
155{
156 aarch64_linux_prepare_to_resume (lwp);
157}
158
176eb98c
MS
159/* Per-process arch-specific data we want to keep. */
160
161struct arch_process_info
162{
163 /* Hardware breakpoint/watchpoint data.
164 The reason for them to be per-process rather than per-thread is
165 due to the lack of information in the gdbserver environment;
166 gdbserver is not told that whether a requested hardware
167 breakpoint/watchpoint is thread specific or not, so it has to set
168 each hw bp/wp for every thread in the current process. The
169 higher level bp/wp management in gdb will resume a thread if a hw
170 bp/wp trap is not expected for it. Since the hw bp/wp setting is
171 same for each thread, it is reasonable for the data to live here.
172 */
173 struct aarch64_debug_reg_state debug_reg_state;
174};
175
3b53ae99
YQ
176/* Return true if the size of register 0 is 8 byte. */
177
178static int
179is_64bit_tdesc (void)
180{
181 struct regcache *regcache = get_thread_regcache (current_thread, 0);
182
183 return register_size (regcache->tdesc, 0) == 8;
184}
185
02895270
AH
186/* Return true if the regcache contains the number of SVE registers. */
187
188static bool
189is_sve_tdesc (void)
190{
191 struct regcache *regcache = get_thread_regcache (current_thread, 0);
192
6cdd651f 193 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
194}
195
176eb98c
MS
196static void
197aarch64_fill_gregset (struct regcache *regcache, void *buf)
198{
6a69a054 199 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
200 int i;
201
202 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
203 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
204 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
205 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
206 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
207}
208
209static void
210aarch64_store_gregset (struct regcache *regcache, const void *buf)
211{
6a69a054 212 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
213 int i;
214
215 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
216 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
217 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
218 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
219 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
220}
221
222static void
223aarch64_fill_fpregset (struct regcache *regcache, void *buf)
224{
9caa3311 225 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
226 int i;
227
228 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
229 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
230 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
231 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
232}
233
234static void
235aarch64_store_fpregset (struct regcache *regcache, const void *buf)
236{
9caa3311
YQ
237 const struct user_fpsimd_state *regset
238 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
239 int i;
240
241 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
242 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
243 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
244 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
245}
246
1ef53e6b
AH
247/* Store the pauth registers to regcache. */
248
249static void
250aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
251{
252 uint64_t *pauth_regset = (uint64_t *) buf;
253 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
254
255 if (pauth_base == 0)
256 return;
257
258 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
259 &pauth_regset[0]);
260 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
261 &pauth_regset[1]);
262}
263
bf9ae9d8
TBA
264bool
265aarch64_target::low_supports_breakpoints ()
266{
267 return true;
268}
269
270/* Implementation of linux target ops method "low_get_pc". */
421530db 271
bf9ae9d8
TBA
272CORE_ADDR
273aarch64_target::low_get_pc (regcache *regcache)
176eb98c 274{
8a7e4587 275 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 276 return linux_get_pc_64bit (regcache);
8a7e4587 277 else
a5652c21 278 return linux_get_pc_32bit (regcache);
176eb98c
MS
279}
280
bf9ae9d8 281/* Implementation of linux target ops method "low_set_pc". */
421530db 282
bf9ae9d8
TBA
283void
284aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 285{
8a7e4587 286 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 287 linux_set_pc_64bit (regcache, pc);
8a7e4587 288 else
a5652c21 289 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
290}
291
176eb98c
MS
292#define aarch64_breakpoint_len 4
293
37d66942
PL
294/* AArch64 BRK software debug mode instruction.
295 This instruction needs to match gdb/aarch64-tdep.c
296 (aarch64_default_breakpoint). */
297static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 298
d7146cda 299/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 300
d7146cda
TBA
301bool
302aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 303{
db91f502
YQ
304 if (is_64bit_tdesc ())
305 {
306 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 307
d7146cda 308 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 309 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 310 return true;
176eb98c 311
d7146cda 312 return false;
db91f502
YQ
313 }
314 else
315 return arm_breakpoint_at (where);
176eb98c
MS
316}
317
176eb98c
MS
318static void
319aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
320{
321 int i;
322
323 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
324 {
325 state->dr_addr_bp[i] = 0;
326 state->dr_ctrl_bp[i] = 0;
327 state->dr_ref_count_bp[i] = 0;
328 }
329
330 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
331 {
332 state->dr_addr_wp[i] = 0;
333 state->dr_ctrl_wp[i] = 0;
334 state->dr_ref_count_wp[i] = 0;
335 }
336}
337
176eb98c
MS
338/* Return the pointer to the debug register state structure in the
339 current process' arch-specific data area. */
340
db3cb7cb 341struct aarch64_debug_reg_state *
88e2cf7e 342aarch64_get_debug_reg_state (pid_t pid)
176eb98c 343{
88e2cf7e 344 struct process_info *proc = find_process_pid (pid);
176eb98c 345
fe978cb0 346 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
347}
348
007c9b97 349/* Implementation of target ops method "supports_z_point_type". */
421530db 350
007c9b97
TBA
351bool
352aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
353{
354 switch (z_type)
355 {
96c97461 356 case Z_PACKET_SW_BP:
4ff0d3d8
PA
357 case Z_PACKET_HW_BP:
358 case Z_PACKET_WRITE_WP:
359 case Z_PACKET_READ_WP:
360 case Z_PACKET_ACCESS_WP:
007c9b97 361 return true;
4ff0d3d8 362 default:
007c9b97 363 return false;
4ff0d3d8
PA
364 }
365}
366
9db9aa23 367/* Implementation of linux target ops method "low_insert_point".
176eb98c 368
421530db
PL
369 It actually only records the info of the to-be-inserted bp/wp;
370 the actual insertion will happen when threads are resumed. */
176eb98c 371
9db9aa23
TBA
372int
373aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
374 int len, raw_breakpoint *bp)
176eb98c
MS
375{
376 int ret;
4ff0d3d8 377 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
378 struct aarch64_debug_reg_state *state
379 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 380
c5e92cca 381 if (show_debug_regs)
176eb98c
MS
382 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
383 (unsigned long) addr, len);
384
802e8e6d
PA
385 /* Determine the type from the raw breakpoint type. */
386 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
387
388 if (targ_type != hw_execute)
39edd165
YQ
389 {
390 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
391 ret = aarch64_handle_watchpoint (targ_type, addr, len,
392 1 /* is_insert */, state);
393 else
394 ret = -1;
395 }
176eb98c 396 else
8d689ee5
YQ
397 {
398 if (len == 3)
399 {
400 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
401 instruction. Set it to 2 to correctly encode length bit
402 mask in hardware/watchpoint control register. */
403 len = 2;
404 }
405 ret = aarch64_handle_breakpoint (targ_type, addr, len,
406 1 /* is_insert */, state);
407 }
176eb98c 408
60a191ed 409 if (show_debug_regs)
88e2cf7e
YQ
410 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
411 targ_type);
176eb98c
MS
412
413 return ret;
414}
415
9db9aa23 416/* Implementation of linux target ops method "low_remove_point".
176eb98c 417
421530db
PL
418 It actually only records the info of the to-be-removed bp/wp,
419 the actual removal will be done when threads are resumed. */
176eb98c 420
9db9aa23
TBA
421int
422aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
423 int len, raw_breakpoint *bp)
176eb98c
MS
424{
425 int ret;
4ff0d3d8 426 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
427 struct aarch64_debug_reg_state *state
428 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 429
c5e92cca 430 if (show_debug_regs)
176eb98c
MS
431 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
432 (unsigned long) addr, len);
433
802e8e6d
PA
434 /* Determine the type from the raw breakpoint type. */
435 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
436
437 /* Set up state pointers. */
438 if (targ_type != hw_execute)
439 ret =
c67ca4de
YQ
440 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
441 state);
176eb98c 442 else
8d689ee5
YQ
443 {
444 if (len == 3)
445 {
446 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
447 instruction. Set it to 2 to correctly encode length bit
448 mask in hardware/watchpoint control register. */
449 len = 2;
450 }
451 ret = aarch64_handle_breakpoint (targ_type, addr, len,
452 0 /* is_insert */, state);
453 }
176eb98c 454
60a191ed 455 if (show_debug_regs)
88e2cf7e
YQ
456 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
457 targ_type);
176eb98c
MS
458
459 return ret;
460}
461
19007d95
LM
462/* Return the address only having significant bits. This is used to ignore
463 the top byte (TBI). */
464
465static CORE_ADDR
466address_significant (CORE_ADDR addr)
467{
468 /* Clear insignificant bits of a target address and sign extend resulting
469 address. */
470 int addr_bit = 56;
471
472 CORE_ADDR sign = (CORE_ADDR) 1 << (addr_bit - 1);
473 addr &= ((CORE_ADDR) 1 << addr_bit) - 1;
474 addr = (addr ^ sign) - sign;
475
476 return addr;
477}
478
ac1bbaca 479/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 480
ac1bbaca
TBA
481CORE_ADDR
482aarch64_target::low_stopped_data_address ()
176eb98c
MS
483{
484 siginfo_t siginfo;
485 int pid, i;
486 struct aarch64_debug_reg_state *state;
487
0bfdf32f 488 pid = lwpid_of (current_thread);
176eb98c
MS
489
490 /* Get the siginfo. */
491 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
492 return (CORE_ADDR) 0;
493
494 /* Need to be a hardware breakpoint/watchpoint trap. */
495 if (siginfo.si_signo != SIGTRAP
496 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
497 return (CORE_ADDR) 0;
498
19007d95
LM
499 /* Make sure to ignore the top byte, otherwise we may not recognize a
500 hardware watchpoint hit. The stopped data addresses coming from the
501 kernel can potentially be tagged addresses. */
502 const CORE_ADDR addr_trap
503 = address_significant ((CORE_ADDR) siginfo.si_addr);
504
176eb98c 505 /* Check if the address matches any watched address. */
88e2cf7e 506 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
507 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
508 {
a3b60e45
JK
509 const unsigned int offset
510 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c 511 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
a3b60e45
JK
512 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
513 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
514 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
515
176eb98c
MS
516 if (state->dr_ref_count_wp[i]
517 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 518 && addr_trap >= addr_watch_aligned
176eb98c 519 && addr_trap < addr_watch + len)
a3b60e45
JK
520 {
521 /* ADDR_TRAP reports the first address of the memory range
522 accessed by the CPU, regardless of what was the memory
523 range watched. Thus, a large CPU access that straddles
524 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
525 ADDR_TRAP that is lower than the
526 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
527
528 addr: | 4 | 5 | 6 | 7 | 8 |
529 |---- range watched ----|
530 |----------- range accessed ------------|
531
532 In this case, ADDR_TRAP will be 4.
533
534 To match a watchpoint known to GDB core, we must never
535 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
536 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
537 positive on kernels older than 4.10. See PR
538 external/20207. */
539 return addr_orig;
540 }
176eb98c
MS
541 }
542
543 return (CORE_ADDR) 0;
544}
545
ac1bbaca 546/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 547
ac1bbaca
TBA
548bool
549aarch64_target::low_stopped_by_watchpoint ()
176eb98c 550{
ac1bbaca 551 return (low_stopped_data_address () != 0);
176eb98c
MS
552}
553
554/* Fetch the thread-local storage pointer for libthread_db. */
555
556ps_err_e
754653a7 557ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
558 lwpid_t lwpid, int idx, void **base)
559{
a0cc84cd
YQ
560 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
561 is_64bit_tdesc ());
176eb98c
MS
562}
563
cb63de7c 564/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 565
cb63de7c
TBA
566bool
567aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
568 int direction)
ade90bde
YQ
569{
570 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
571 if (!is_64bit_tdesc ())
572 {
573 if (direction == 0)
574 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
575 native);
576 else
577 aarch64_siginfo_from_compat_siginfo (native,
578 (struct compat_siginfo *) inf);
579
cb63de7c 580 return true;
ade90bde
YQ
581 }
582
cb63de7c 583 return false;
ade90bde
YQ
584}
585
fd000fb3 586/* Implementation of linux target ops method "low_new_process". */
176eb98c 587
fd000fb3
TBA
588arch_process_info *
589aarch64_target::low_new_process ()
176eb98c 590{
8d749320 591 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
592
593 aarch64_init_debug_reg_state (&info->debug_reg_state);
594
595 return info;
596}
597
fd000fb3 598/* Implementation of linux target ops method "low_delete_process". */
04ec7890 599
fd000fb3
TBA
600void
601aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
602{
603 xfree (info);
604}
605
fd000fb3
TBA
606void
607aarch64_target::low_new_thread (lwp_info *lwp)
608{
609 aarch64_linux_new_thread (lwp);
610}
421530db 611
fd000fb3
TBA
612void
613aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
614{
615 aarch64_linux_delete_thread (arch_lwp);
616}
617
618/* Implementation of linux target ops method "low_new_fork". */
619
620void
621aarch64_target::low_new_fork (process_info *parent,
622 process_info *child)
3a8a0396
DB
623{
624 /* These are allocated by linux_add_process. */
61a7418c
DB
625 gdb_assert (parent->priv != NULL
626 && parent->priv->arch_private != NULL);
627 gdb_assert (child->priv != NULL
628 && child->priv->arch_private != NULL);
3a8a0396
DB
629
630 /* Linux kernel before 2.6.33 commit
631 72f674d203cd230426437cdcf7dd6f681dad8b0d
632 will inherit hardware debug registers from parent
633 on fork/vfork/clone. Newer Linux kernels create such tasks with
634 zeroed debug registers.
635
636 GDB core assumes the child inherits the watchpoints/hw
637 breakpoints of the parent, and will remove them all from the
638 forked off process. Copy the debug registers mirrors into the
639 new process so that all breakpoints and watchpoints can be
640 removed together. The debug registers mirror will become zeroed
641 in the end before detaching the forked off process, thus making
642 this compatible with older Linux kernels too. */
643
61a7418c 644 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
645}
646
ee4fbcfa
AH
647/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
648#define AARCH64_HWCAP_PACA (1 << 30)
649
797bcff5 650/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 651
797bcff5
TBA
652void
653aarch64_target::low_arch_setup ()
3b53ae99
YQ
654{
655 unsigned int machine;
656 int is_elf64;
657 int tid;
658
659 tid = lwpid_of (current_thread);
660
661 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
662
663 if (is_elf64)
fefa175e
AH
664 {
665 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0 666 unsigned long hwcap = linux_get_hwcap (8);
c1bd443b 667 unsigned long hwcap2 = linux_get_hwcap2 (8);
974c89e0 668 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
c1bd443b
LM
669 /* MTE is AArch64-only. */
670 bool mte_p = hwcap2 & HWCAP2_MTE;
ee4fbcfa 671
c1bd443b
LM
672 current_process ()->tdesc
673 = aarch64_linux_read_description (vq, pauth_p, mte_p);
fefa175e 674 }
3b53ae99 675 else
7cc17433 676 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 677
af1b22f3 678 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
679}
680
02895270
AH
681/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
682
683static void
684aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
685{
686 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
687}
688
689/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
690
691static void
692aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
693{
694 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
695}
696
3aee8918 697static struct regset_info aarch64_regsets[] =
176eb98c
MS
698{
699 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
700 sizeof (struct user_pt_regs), GENERAL_REGS,
701 aarch64_fill_gregset, aarch64_store_gregset },
702 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
703 sizeof (struct user_fpsimd_state), FP_REGS,
704 aarch64_fill_fpregset, aarch64_store_fpregset
705 },
1ef53e6b
AH
706 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
707 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
708 NULL, aarch64_store_pauthregset },
50bc912a 709 NULL_REGSET
176eb98c
MS
710};
711
3aee8918
PA
712static struct regsets_info aarch64_regsets_info =
713 {
714 aarch64_regsets, /* regsets */
715 0, /* num_regsets */
716 NULL, /* disabled_regsets */
717 };
718
3b53ae99 719static struct regs_info regs_info_aarch64 =
3aee8918
PA
720 {
721 NULL, /* regset_bitmap */
c2d65f38 722 NULL, /* usrregs */
3aee8918
PA
723 &aarch64_regsets_info,
724 };
725
02895270
AH
726static struct regset_info aarch64_sve_regsets[] =
727{
728 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
729 sizeof (struct user_pt_regs), GENERAL_REGS,
730 aarch64_fill_gregset, aarch64_store_gregset },
731 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
732 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
733 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
734 },
1ef53e6b
AH
735 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
736 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
737 NULL, aarch64_store_pauthregset },
02895270
AH
738 NULL_REGSET
739};
740
741static struct regsets_info aarch64_sve_regsets_info =
742 {
743 aarch64_sve_regsets, /* regsets. */
744 0, /* num_regsets. */
745 NULL, /* disabled_regsets. */
746 };
747
748static struct regs_info regs_info_aarch64_sve =
749 {
750 NULL, /* regset_bitmap. */
751 NULL, /* usrregs. */
752 &aarch64_sve_regsets_info,
753 };
754
aa8d21c9 755/* Implementation of linux target ops method "get_regs_info". */
421530db 756
aa8d21c9
TBA
757const regs_info *
758aarch64_target::get_regs_info ()
3aee8918 759{
02895270 760 if (!is_64bit_tdesc ())
3b53ae99 761 return &regs_info_aarch32;
02895270
AH
762
763 if (is_sve_tdesc ())
764 return &regs_info_aarch64_sve;
765
766 return &regs_info_aarch64;
3aee8918
PA
767}
768
47f70aa7 769/* Implementation of target ops method "supports_tracepoints". */
7671bf47 770
47f70aa7
TBA
771bool
772aarch64_target::supports_tracepoints ()
7671bf47 773{
524b57e6 774 if (current_thread == NULL)
47f70aa7 775 return true;
524b57e6
YQ
776 else
777 {
778 /* We don't support tracepoints on aarch32 now. */
779 return is_64bit_tdesc ();
780 }
7671bf47
PL
781}
782
13e567af 783/* Implementation of linux target ops method "low_get_thread_area". */
bb903df0 784
13e567af
TBA
785int
786aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
bb903df0
PL
787{
788 struct iovec iovec;
789 uint64_t reg;
790
791 iovec.iov_base = &reg;
792 iovec.iov_len = sizeof (reg);
793
794 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
795 return -1;
796
797 *addrp = reg;
798
799 return 0;
800}
801
9eedd27d
TBA
802bool
803aarch64_target::low_supports_catch_syscall ()
804{
805 return true;
806}
061fc021 807
9eedd27d
TBA
808/* Implementation of linux target ops method "low_get_syscall_trapinfo". */
809
810void
811aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
061fc021
YQ
812{
813 int use_64bit = register_size (regcache->tdesc, 0) == 8;
814
815 if (use_64bit)
816 {
817 long l_sysno;
818
819 collect_register_by_name (regcache, "x8", &l_sysno);
820 *sysno = (int) l_sysno;
821 }
822 else
823 collect_register_by_name (regcache, "r7", sysno);
824}
825
afbe19f8
PL
826/* List of condition codes that we need. */
827
828enum aarch64_condition_codes
829{
830 EQ = 0x0,
831 NE = 0x1,
832 LO = 0x3,
833 GE = 0xa,
834 LT = 0xb,
835 GT = 0xc,
836 LE = 0xd,
bb903df0
PL
837};
838
6c1c9a8b
YQ
839enum aarch64_operand_type
840{
841 OPERAND_IMMEDIATE,
842 OPERAND_REGISTER,
843};
844
bb903df0
PL
845/* Representation of an operand. At this time, it only supports register
846 and immediate types. */
847
848struct aarch64_operand
849{
850 /* Type of the operand. */
6c1c9a8b
YQ
851 enum aarch64_operand_type type;
852
bb903df0
PL
853 /* Value of the operand according to the type. */
854 union
855 {
856 uint32_t imm;
857 struct aarch64_register reg;
858 };
859};
860
861/* List of registers that we are currently using, we can add more here as
862 we need to use them. */
863
864/* General purpose scratch registers (64 bit). */
865static const struct aarch64_register x0 = { 0, 1 };
866static const struct aarch64_register x1 = { 1, 1 };
867static const struct aarch64_register x2 = { 2, 1 };
868static const struct aarch64_register x3 = { 3, 1 };
869static const struct aarch64_register x4 = { 4, 1 };
870
871/* General purpose scratch registers (32 bit). */
afbe19f8 872static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
873static const struct aarch64_register w2 = { 2, 0 };
874
875/* Intra-procedure scratch registers. */
876static const struct aarch64_register ip0 = { 16, 1 };
877
878/* Special purpose registers. */
afbe19f8
PL
879static const struct aarch64_register fp = { 29, 1 };
880static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
881static const struct aarch64_register sp = { 31, 1 };
882static const struct aarch64_register xzr = { 31, 1 };
883
884/* Dynamically allocate a new register. If we know the register
885 statically, we should make it a global as above instead of using this
886 helper function. */
887
888static struct aarch64_register
889aarch64_register (unsigned num, int is64)
890{
891 return (struct aarch64_register) { num, is64 };
892}
893
894/* Helper function to create a register operand, for instructions with
895 different types of operands.
896
897 For example:
898 p += emit_mov (p, x0, register_operand (x1)); */
899
900static struct aarch64_operand
901register_operand (struct aarch64_register reg)
902{
903 struct aarch64_operand operand;
904
905 operand.type = OPERAND_REGISTER;
906 operand.reg = reg;
907
908 return operand;
909}
910
911/* Helper function to create an immediate operand, for instructions with
912 different types of operands.
913
914 For example:
915 p += emit_mov (p, x0, immediate_operand (12)); */
916
917static struct aarch64_operand
918immediate_operand (uint32_t imm)
919{
920 struct aarch64_operand operand;
921
922 operand.type = OPERAND_IMMEDIATE;
923 operand.imm = imm;
924
925 return operand;
926}
927
bb903df0
PL
928/* Helper function to create an offset memory operand.
929
930 For example:
931 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
932
933static struct aarch64_memory_operand
934offset_memory_operand (int32_t offset)
935{
936 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
937}
938
939/* Helper function to create a pre-index memory operand.
940
941 For example:
942 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
943
944static struct aarch64_memory_operand
945preindex_memory_operand (int32_t index)
946{
947 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
948}
949
afbe19f8
PL
950/* Helper function to create a post-index memory operand.
951
952 For example:
953 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
954
955static struct aarch64_memory_operand
956postindex_memory_operand (int32_t index)
957{
958 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
959}
960
bb903df0
PL
961/* System control registers. These special registers can be written and
962 read with the MRS and MSR instructions.
963
964 - NZCV: Condition flags. GDB refers to this register under the CPSR
965 name.
966 - FPSR: Floating-point status register.
967 - FPCR: Floating-point control registers.
968 - TPIDR_EL0: Software thread ID register. */
969
970enum aarch64_system_control_registers
971{
972 /* op0 op1 crn crm op2 */
973 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
974 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
975 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
976 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
977};
978
bb903df0
PL
979/* Write a BLR instruction into *BUF.
980
981 BLR rn
982
983 RN is the register to branch to. */
984
985static int
986emit_blr (uint32_t *buf, struct aarch64_register rn)
987{
e1c587c3 988 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
989}
990
afbe19f8 991/* Write a RET instruction into *BUF.
bb903df0 992
afbe19f8 993 RET xn
bb903df0 994
afbe19f8 995 RN is the register to branch to. */
bb903df0
PL
996
997static int
afbe19f8
PL
998emit_ret (uint32_t *buf, struct aarch64_register rn)
999{
e1c587c3 1000 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
1001}
1002
1003static int
1004emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
1005 struct aarch64_register rt,
1006 struct aarch64_register rt2,
1007 struct aarch64_register rn,
1008 struct aarch64_memory_operand operand)
bb903df0
PL
1009{
1010 uint32_t opc;
1011 uint32_t pre_index;
1012 uint32_t write_back;
1013
1014 if (rt.is64)
1015 opc = ENCODE (2, 2, 30);
1016 else
1017 opc = ENCODE (0, 2, 30);
1018
1019 switch (operand.type)
1020 {
1021 case MEMORY_OPERAND_OFFSET:
1022 {
1023 pre_index = ENCODE (1, 1, 24);
1024 write_back = ENCODE (0, 1, 23);
1025 break;
1026 }
afbe19f8
PL
1027 case MEMORY_OPERAND_POSTINDEX:
1028 {
1029 pre_index = ENCODE (0, 1, 24);
1030 write_back = ENCODE (1, 1, 23);
1031 break;
1032 }
bb903df0
PL
1033 case MEMORY_OPERAND_PREINDEX:
1034 {
1035 pre_index = ENCODE (1, 1, 24);
1036 write_back = ENCODE (1, 1, 23);
1037 break;
1038 }
1039 default:
1040 return 0;
1041 }
1042
e1c587c3
YQ
1043 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1044 | ENCODE (operand.index >> 3, 7, 15)
1045 | ENCODE (rt2.num, 5, 10)
1046 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1047}
1048
afbe19f8
PL
1049/* Write a STP instruction into *BUF.
1050
1051 STP rt, rt2, [rn, #offset]
1052 STP rt, rt2, [rn, #index]!
1053 STP rt, rt2, [rn], #index
1054
1055 RT and RT2 are the registers to store.
1056 RN is the base address register.
1057 OFFSET is the immediate to add to the base address. It is limited to a
1058 -512 .. 504 range (7 bits << 3). */
1059
1060static int
1061emit_stp (uint32_t *buf, struct aarch64_register rt,
1062 struct aarch64_register rt2, struct aarch64_register rn,
1063 struct aarch64_memory_operand operand)
1064{
1065 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1066}
1067
1068/* Write a LDP instruction into *BUF.
1069
1070 LDP rt, rt2, [rn, #offset]
1071 LDP rt, rt2, [rn, #index]!
1072 LDP rt, rt2, [rn], #index
1073
1074 RT and RT2 are the registers to store.
1075 RN is the base address register.
1076 OFFSET is the immediate to add to the base address. It is limited to a
1077 -512 .. 504 range (7 bits << 3). */
1078
1079static int
1080emit_ldp (uint32_t *buf, struct aarch64_register rt,
1081 struct aarch64_register rt2, struct aarch64_register rn,
1082 struct aarch64_memory_operand operand)
1083{
1084 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1085}
1086
bb903df0
PL
1087/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1088
1089 LDP qt, qt2, [rn, #offset]
1090
1091 RT and RT2 are the Q registers to store.
1092 RN is the base address register.
1093 OFFSET is the immediate to add to the base address. It is limited to
1094 -1024 .. 1008 range (7 bits << 4). */
1095
1096static int
1097emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1098 struct aarch64_register rn, int32_t offset)
1099{
1100 uint32_t opc = ENCODE (2, 2, 30);
1101 uint32_t pre_index = ENCODE (1, 1, 24);
1102
e1c587c3
YQ
1103 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1104 | ENCODE (offset >> 4, 7, 15)
1105 | ENCODE (rt2, 5, 10)
1106 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1107}
1108
1109/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1110
1111 STP qt, qt2, [rn, #offset]
1112
1113 RT and RT2 are the Q registers to store.
1114 RN is the base address register.
1115 OFFSET is the immediate to add to the base address. It is limited to
1116 -1024 .. 1008 range (7 bits << 4). */
1117
1118static int
1119emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1120 struct aarch64_register rn, int32_t offset)
1121{
1122 uint32_t opc = ENCODE (2, 2, 30);
1123 uint32_t pre_index = ENCODE (1, 1, 24);
1124
e1c587c3 1125 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1126 | ENCODE (offset >> 4, 7, 15)
1127 | ENCODE (rt2, 5, 10)
1128 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1129}
1130
afbe19f8
PL
1131/* Write a LDRH instruction into *BUF.
1132
1133 LDRH wt, [xn, #offset]
1134 LDRH wt, [xn, #index]!
1135 LDRH wt, [xn], #index
1136
1137 RT is the register to store.
1138 RN is the base address register.
1139 OFFSET is the immediate to add to the base address. It is limited to
1140 0 .. 32760 range (12 bits << 3). */
1141
1142static int
1143emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1144 struct aarch64_register rn,
1145 struct aarch64_memory_operand operand)
1146{
1c2e1515 1147 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1148}
1149
1150/* Write a LDRB instruction into *BUF.
1151
1152 LDRB wt, [xn, #offset]
1153 LDRB wt, [xn, #index]!
1154 LDRB wt, [xn], #index
1155
1156 RT is the register to store.
1157 RN is the base address register.
1158 OFFSET is the immediate to add to the base address. It is limited to
1159 0 .. 32760 range (12 bits << 3). */
1160
1161static int
1162emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1163 struct aarch64_register rn,
1164 struct aarch64_memory_operand operand)
1165{
1c2e1515 1166 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1167}
1168
bb903df0 1169
bb903df0
PL
1170
1171/* Write a STR instruction into *BUF.
1172
1173 STR rt, [rn, #offset]
1174 STR rt, [rn, #index]!
afbe19f8 1175 STR rt, [rn], #index
bb903df0
PL
1176
1177 RT is the register to store.
1178 RN is the base address register.
1179 OFFSET is the immediate to add to the base address. It is limited to
1180 0 .. 32760 range (12 bits << 3). */
1181
1182static int
1183emit_str (uint32_t *buf, struct aarch64_register rt,
1184 struct aarch64_register rn,
1185 struct aarch64_memory_operand operand)
1186{
1c2e1515 1187 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1188}
1189
1190/* Helper function emitting an exclusive load or store instruction. */
1191
1192static int
1193emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1194 enum aarch64_opcodes opcode,
1195 struct aarch64_register rs,
1196 struct aarch64_register rt,
1197 struct aarch64_register rt2,
1198 struct aarch64_register rn)
1199{
e1c587c3
YQ
1200 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1201 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1202 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1203}
1204
1205/* Write a LAXR instruction into *BUF.
1206
1207 LDAXR rt, [xn]
1208
1209 RT is the destination register.
1210 RN is the base address register. */
1211
1212static int
1213emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1214 struct aarch64_register rn)
1215{
1216 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1217 xzr, rn);
1218}
1219
1220/* Write a STXR instruction into *BUF.
1221
1222 STXR ws, rt, [xn]
1223
1224 RS is the result register, it indicates if the store succeeded or not.
1225 RT is the destination register.
1226 RN is the base address register. */
1227
1228static int
1229emit_stxr (uint32_t *buf, struct aarch64_register rs,
1230 struct aarch64_register rt, struct aarch64_register rn)
1231{
1232 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1233 xzr, rn);
1234}
1235
1236/* Write a STLR instruction into *BUF.
1237
1238 STLR rt, [xn]
1239
1240 RT is the register to store.
1241 RN is the base address register. */
1242
1243static int
1244emit_stlr (uint32_t *buf, struct aarch64_register rt,
1245 struct aarch64_register rn)
1246{
1247 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1248 xzr, rn);
1249}
1250
1251/* Helper function for data processing instructions with register sources. */
1252
1253static int
231c0592 1254emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1255 struct aarch64_register rd,
1256 struct aarch64_register rn,
1257 struct aarch64_register rm)
1258{
1259 uint32_t size = ENCODE (rd.is64, 1, 31);
1260
e1c587c3
YQ
1261 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1262 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1263}
1264
1265/* Helper function for data processing instructions taking either a register
1266 or an immediate. */
1267
1268static int
1269emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1270 struct aarch64_register rd,
1271 struct aarch64_register rn,
1272 struct aarch64_operand operand)
1273{
1274 uint32_t size = ENCODE (rd.is64, 1, 31);
1275 /* The opcode is different for register and immediate source operands. */
1276 uint32_t operand_opcode;
1277
1278 if (operand.type == OPERAND_IMMEDIATE)
1279 {
1280 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1281 operand_opcode = ENCODE (8, 4, 25);
1282
e1c587c3
YQ
1283 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1284 | ENCODE (operand.imm, 12, 10)
1285 | ENCODE (rn.num, 5, 5)
1286 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1287 }
1288 else
1289 {
1290 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1291 operand_opcode = ENCODE (5, 4, 25);
1292
1293 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1294 rn, operand.reg);
1295 }
1296}
1297
1298/* Write an ADD instruction into *BUF.
1299
1300 ADD rd, rn, #imm
1301 ADD rd, rn, rm
1302
1303 This function handles both an immediate and register add.
1304
1305 RD is the destination register.
1306 RN is the input register.
1307 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1308 OPERAND_REGISTER. */
1309
1310static int
1311emit_add (uint32_t *buf, struct aarch64_register rd,
1312 struct aarch64_register rn, struct aarch64_operand operand)
1313{
1314 return emit_data_processing (buf, ADD, rd, rn, operand);
1315}
1316
1317/* Write a SUB instruction into *BUF.
1318
1319 SUB rd, rn, #imm
1320 SUB rd, rn, rm
1321
1322 This function handles both an immediate and register sub.
1323
1324 RD is the destination register.
1325 RN is the input register.
1326 IMM is the immediate to substract to RN. */
1327
1328static int
1329emit_sub (uint32_t *buf, struct aarch64_register rd,
1330 struct aarch64_register rn, struct aarch64_operand operand)
1331{
1332 return emit_data_processing (buf, SUB, rd, rn, operand);
1333}
1334
1335/* Write a MOV instruction into *BUF.
1336
1337 MOV rd, #imm
1338 MOV rd, rm
1339
1340 This function handles both a wide immediate move and a register move,
1341 with the condition that the source register is not xzr. xzr and the
1342 stack pointer share the same encoding and this function only supports
1343 the stack pointer.
1344
1345 RD is the destination register.
1346 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1347 OPERAND_REGISTER. */
1348
1349static int
1350emit_mov (uint32_t *buf, struct aarch64_register rd,
1351 struct aarch64_operand operand)
1352{
1353 if (operand.type == OPERAND_IMMEDIATE)
1354 {
1355 uint32_t size = ENCODE (rd.is64, 1, 31);
1356 /* Do not shift the immediate. */
1357 uint32_t shift = ENCODE (0, 2, 21);
1358
e1c587c3
YQ
1359 return aarch64_emit_insn (buf, MOV | size | shift
1360 | ENCODE (operand.imm, 16, 5)
1361 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1362 }
1363 else
1364 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1365}
1366
1367/* Write a MOVK instruction into *BUF.
1368
1369 MOVK rd, #imm, lsl #shift
1370
1371 RD is the destination register.
1372 IMM is the immediate.
1373 SHIFT is the logical shift left to apply to IMM. */
1374
1375static int
7781c06f
YQ
1376emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1377 unsigned shift)
bb903df0
PL
1378{
1379 uint32_t size = ENCODE (rd.is64, 1, 31);
1380
e1c587c3
YQ
1381 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1382 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1383}
1384
1385/* Write instructions into *BUF in order to move ADDR into a register.
1386 ADDR can be a 64-bit value.
1387
1388 This function will emit a series of MOV and MOVK instructions, such as:
1389
1390 MOV xd, #(addr)
1391 MOVK xd, #(addr >> 16), lsl #16
1392 MOVK xd, #(addr >> 32), lsl #32
1393 MOVK xd, #(addr >> 48), lsl #48 */
1394
1395static int
1396emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1397{
1398 uint32_t *p = buf;
1399
1400 /* The MOV (wide immediate) instruction clears to top bits of the
1401 register. */
1402 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1403
1404 if ((addr >> 16) != 0)
1405 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1406 else
1407 return p - buf;
1408
1409 if ((addr >> 32) != 0)
1410 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1411 else
1412 return p - buf;
1413
1414 if ((addr >> 48) != 0)
1415 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1416
1417 return p - buf;
1418}
1419
afbe19f8
PL
1420/* Write a SUBS instruction into *BUF.
1421
1422 SUBS rd, rn, rm
1423
1424 This instruction update the condition flags.
1425
1426 RD is the destination register.
1427 RN and RM are the source registers. */
1428
1429static int
1430emit_subs (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rn, struct aarch64_operand operand)
1432{
1433 return emit_data_processing (buf, SUBS, rd, rn, operand);
1434}
1435
1436/* Write a CMP instruction into *BUF.
1437
1438 CMP rn, rm
1439
1440 This instruction is an alias of SUBS xzr, rn, rm.
1441
1442 RN and RM are the registers to compare. */
1443
1444static int
1445emit_cmp (uint32_t *buf, struct aarch64_register rn,
1446 struct aarch64_operand operand)
1447{
1448 return emit_subs (buf, xzr, rn, operand);
1449}
1450
1451/* Write a AND instruction into *BUF.
1452
1453 AND rd, rn, rm
1454
1455 RD is the destination register.
1456 RN and RM are the source registers. */
1457
1458static int
1459emit_and (uint32_t *buf, struct aarch64_register rd,
1460 struct aarch64_register rn, struct aarch64_register rm)
1461{
1462 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1463}
1464
1465/* Write a ORR instruction into *BUF.
1466
1467 ORR rd, rn, rm
1468
1469 RD is the destination register.
1470 RN and RM are the source registers. */
1471
1472static int
1473emit_orr (uint32_t *buf, struct aarch64_register rd,
1474 struct aarch64_register rn, struct aarch64_register rm)
1475{
1476 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1477}
1478
1479/* Write a ORN instruction into *BUF.
1480
1481 ORN rd, rn, rm
1482
1483 RD is the destination register.
1484 RN and RM are the source registers. */
1485
1486static int
1487emit_orn (uint32_t *buf, struct aarch64_register rd,
1488 struct aarch64_register rn, struct aarch64_register rm)
1489{
1490 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1491}
1492
1493/* Write a EOR instruction into *BUF.
1494
1495 EOR rd, rn, rm
1496
1497 RD is the destination register.
1498 RN and RM are the source registers. */
1499
1500static int
1501emit_eor (uint32_t *buf, struct aarch64_register rd,
1502 struct aarch64_register rn, struct aarch64_register rm)
1503{
1504 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1505}
1506
1507/* Write a MVN instruction into *BUF.
1508
1509 MVN rd, rm
1510
1511 This is an alias for ORN rd, xzr, rm.
1512
1513 RD is the destination register.
1514 RM is the source register. */
1515
1516static int
1517emit_mvn (uint32_t *buf, struct aarch64_register rd,
1518 struct aarch64_register rm)
1519{
1520 return emit_orn (buf, rd, xzr, rm);
1521}
1522
1523/* Write a LSLV instruction into *BUF.
1524
1525 LSLV rd, rn, rm
1526
1527 RD is the destination register.
1528 RN and RM are the source registers. */
1529
1530static int
1531emit_lslv (uint32_t *buf, struct aarch64_register rd,
1532 struct aarch64_register rn, struct aarch64_register rm)
1533{
1534 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1535}
1536
1537/* Write a LSRV instruction into *BUF.
1538
1539 LSRV rd, rn, rm
1540
1541 RD is the destination register.
1542 RN and RM are the source registers. */
1543
1544static int
1545emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1546 struct aarch64_register rn, struct aarch64_register rm)
1547{
1548 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1549}
1550
1551/* Write a ASRV instruction into *BUF.
1552
1553 ASRV rd, rn, rm
1554
1555 RD is the destination register.
1556 RN and RM are the source registers. */
1557
1558static int
1559emit_asrv (uint32_t *buf, struct aarch64_register rd,
1560 struct aarch64_register rn, struct aarch64_register rm)
1561{
1562 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1563}
1564
1565/* Write a MUL instruction into *BUF.
1566
1567 MUL rd, rn, rm
1568
1569 RD is the destination register.
1570 RN and RM are the source registers. */
1571
1572static int
1573emit_mul (uint32_t *buf, struct aarch64_register rd,
1574 struct aarch64_register rn, struct aarch64_register rm)
1575{
1576 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1577}
1578
bb903df0
PL
1579/* Write a MRS instruction into *BUF. The register size is 64-bit.
1580
1581 MRS xt, system_reg
1582
1583 RT is the destination register.
1584 SYSTEM_REG is special purpose register to read. */
1585
1586static int
1587emit_mrs (uint32_t *buf, struct aarch64_register rt,
1588 enum aarch64_system_control_registers system_reg)
1589{
e1c587c3
YQ
1590 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1591 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1592}
1593
1594/* Write a MSR instruction into *BUF. The register size is 64-bit.
1595
1596 MSR system_reg, xt
1597
1598 SYSTEM_REG is special purpose register to write.
1599 RT is the input register. */
1600
1601static int
1602emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1603 struct aarch64_register rt)
1604{
e1c587c3
YQ
1605 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1606 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1607}
1608
1609/* Write a SEVL instruction into *BUF.
1610
1611 This is a hint instruction telling the hardware to trigger an event. */
1612
1613static int
1614emit_sevl (uint32_t *buf)
1615{
e1c587c3 1616 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1617}
1618
1619/* Write a WFE instruction into *BUF.
1620
1621 This is a hint instruction telling the hardware to wait for an event. */
1622
1623static int
1624emit_wfe (uint32_t *buf)
1625{
e1c587c3 1626 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1627}
1628
afbe19f8
PL
1629/* Write a SBFM instruction into *BUF.
1630
1631 SBFM rd, rn, #immr, #imms
1632
1633 This instruction moves the bits from #immr to #imms into the
1634 destination, sign extending the result.
1635
1636 RD is the destination register.
1637 RN is the source register.
1638 IMMR is the bit number to start at (least significant bit).
1639 IMMS is the bit number to stop at (most significant bit). */
1640
1641static int
1642emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1643 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1644{
1645 uint32_t size = ENCODE (rd.is64, 1, 31);
1646 uint32_t n = ENCODE (rd.is64, 1, 22);
1647
e1c587c3
YQ
1648 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1649 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1650 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1651}
1652
1653/* Write a SBFX instruction into *BUF.
1654
1655 SBFX rd, rn, #lsb, #width
1656
1657 This instruction moves #width bits from #lsb into the destination, sign
1658 extending the result. This is an alias for:
1659
1660 SBFM rd, rn, #lsb, #(lsb + width - 1)
1661
1662 RD is the destination register.
1663 RN is the source register.
1664 LSB is the bit number to start at (least significant bit).
1665 WIDTH is the number of bits to move. */
1666
1667static int
1668emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1669 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1670{
1671 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1672}
1673
1674/* Write a UBFM instruction into *BUF.
1675
1676 UBFM rd, rn, #immr, #imms
1677
1678 This instruction moves the bits from #immr to #imms into the
1679 destination, extending the result with zeros.
1680
1681 RD is the destination register.
1682 RN is the source register.
1683 IMMR is the bit number to start at (least significant bit).
1684 IMMS is the bit number to stop at (most significant bit). */
1685
1686static int
1687emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1688 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1689{
1690 uint32_t size = ENCODE (rd.is64, 1, 31);
1691 uint32_t n = ENCODE (rd.is64, 1, 22);
1692
e1c587c3
YQ
1693 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1694 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1695 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1696}
1697
1698/* Write a UBFX instruction into *BUF.
1699
1700 UBFX rd, rn, #lsb, #width
1701
1702 This instruction moves #width bits from #lsb into the destination,
1703 extending the result with zeros. This is an alias for:
1704
1705 UBFM rd, rn, #lsb, #(lsb + width - 1)
1706
1707 RD is the destination register.
1708 RN is the source register.
1709 LSB is the bit number to start at (least significant bit).
1710 WIDTH is the number of bits to move. */
1711
1712static int
1713emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1714 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1715{
1716 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1717}
1718
1719/* Write a CSINC instruction into *BUF.
1720
1721 CSINC rd, rn, rm, cond
1722
1723 This instruction conditionally increments rn or rm and places the result
1724 in rd. rn is chosen is the condition is true.
1725
1726 RD is the destination register.
1727 RN and RM are the source registers.
1728 COND is the encoded condition. */
1729
1730static int
1731emit_csinc (uint32_t *buf, struct aarch64_register rd,
1732 struct aarch64_register rn, struct aarch64_register rm,
1733 unsigned cond)
1734{
1735 uint32_t size = ENCODE (rd.is64, 1, 31);
1736
e1c587c3
YQ
1737 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1738 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1739 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1740}
1741
1742/* Write a CSET instruction into *BUF.
1743
1744 CSET rd, cond
1745
1746 This instruction conditionally write 1 or 0 in the destination register.
1747 1 is written if the condition is true. This is an alias for:
1748
1749 CSINC rd, xzr, xzr, !cond
1750
1751 Note that the condition needs to be inverted.
1752
1753 RD is the destination register.
1754 RN and RM are the source registers.
1755 COND is the encoded condition. */
1756
1757static int
1758emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1759{
1760 /* The least significant bit of the condition needs toggling in order to
1761 invert it. */
1762 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1763}
1764
bb903df0
PL
1765/* Write LEN instructions from BUF into the inferior memory at *TO.
1766
1767 Note instructions are always little endian on AArch64, unlike data. */
1768
1769static void
1770append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1771{
1772 size_t byte_len = len * sizeof (uint32_t);
1773#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1774 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1775 size_t i;
1776
1777 for (i = 0; i < len; i++)
1778 le_buf[i] = htole32 (buf[i]);
1779
4196ab2a 1780 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1781
1782 xfree (le_buf);
1783#else
4196ab2a 1784 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1785#endif
1786
1787 *to += byte_len;
1788}
1789
0badd99f
YQ
1790/* Sub-class of struct aarch64_insn_data, store information of
1791 instruction relocation for fast tracepoint. Visitor can
1792 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1793 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1794
0badd99f
YQ
1795struct aarch64_insn_relocation_data
1796{
1797 struct aarch64_insn_data base;
1798
1799 /* The new address the instruction is relocated to. */
1800 CORE_ADDR new_addr;
1801 /* Pointer to the buffer of relocated instruction(s). */
1802 uint32_t *insn_ptr;
1803};
1804
1805/* Implementation of aarch64_insn_visitor method "b". */
1806
1807static void
1808aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1809 struct aarch64_insn_data *data)
1810{
1811 struct aarch64_insn_relocation_data *insn_reloc
1812 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1813 int64_t new_offset
0badd99f
YQ
1814 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1815
1816 if (can_encode_int32 (new_offset, 28))
1817 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1818}
1819
1820/* Implementation of aarch64_insn_visitor method "b_cond". */
1821
1822static void
1823aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1824 struct aarch64_insn_data *data)
1825{
1826 struct aarch64_insn_relocation_data *insn_reloc
1827 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1828 int64_t new_offset
0badd99f
YQ
1829 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1830
1831 if (can_encode_int32 (new_offset, 21))
1832 {
1833 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1834 new_offset);
bb903df0 1835 }
0badd99f 1836 else if (can_encode_int32 (new_offset, 28))
bb903df0 1837 {
0badd99f
YQ
1838 /* The offset is out of range for a conditional branch
1839 instruction but not for a unconditional branch. We can use
1840 the following instructions instead:
bb903df0 1841
0badd99f
YQ
1842 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1843 B NOT_TAKEN ; Else jump over TAKEN and continue.
1844 TAKEN:
1845 B #(offset - 8)
1846 NOT_TAKEN:
1847
1848 */
1849
1850 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1851 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1852 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1853 }
0badd99f 1854}
bb903df0 1855
0badd99f
YQ
1856/* Implementation of aarch64_insn_visitor method "cb". */
1857
1858static void
1859aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1860 const unsigned rn, int is64,
1861 struct aarch64_insn_data *data)
1862{
1863 struct aarch64_insn_relocation_data *insn_reloc
1864 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1865 int64_t new_offset
0badd99f
YQ
1866 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1867
1868 if (can_encode_int32 (new_offset, 21))
1869 {
1870 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1871 aarch64_register (rn, is64), new_offset);
bb903df0 1872 }
0badd99f 1873 else if (can_encode_int32 (new_offset, 28))
bb903df0 1874 {
0badd99f
YQ
1875 /* The offset is out of range for a compare and branch
1876 instruction but not for a unconditional branch. We can use
1877 the following instructions instead:
1878
1879 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1880 B NOT_TAKEN ; Else jump over TAKEN and continue.
1881 TAKEN:
1882 B #(offset - 8)
1883 NOT_TAKEN:
1884
1885 */
1886 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1887 aarch64_register (rn, is64), 8);
1888 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1889 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1890 }
1891}
bb903df0 1892
0badd99f 1893/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1894
0badd99f
YQ
1895static void
1896aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1897 const unsigned rt, unsigned bit,
1898 struct aarch64_insn_data *data)
1899{
1900 struct aarch64_insn_relocation_data *insn_reloc
1901 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1902 int64_t new_offset
0badd99f
YQ
1903 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1904
1905 if (can_encode_int32 (new_offset, 16))
1906 {
1907 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1908 aarch64_register (rt, 1), new_offset);
bb903df0 1909 }
0badd99f 1910 else if (can_encode_int32 (new_offset, 28))
bb903df0 1911 {
0badd99f
YQ
1912 /* The offset is out of range for a test bit and branch
1913 instruction but not for a unconditional branch. We can use
1914 the following instructions instead:
1915
1916 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1917 B NOT_TAKEN ; Else jump over TAKEN and continue.
1918 TAKEN:
1919 B #(offset - 8)
1920 NOT_TAKEN:
1921
1922 */
1923 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1924 aarch64_register (rt, 1), 8);
1925 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1926 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1927 new_offset - 8);
1928 }
1929}
bb903df0 1930
0badd99f 1931/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1932
0badd99f
YQ
1933static void
1934aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1935 const int is_adrp,
1936 struct aarch64_insn_data *data)
1937{
1938 struct aarch64_insn_relocation_data *insn_reloc
1939 = (struct aarch64_insn_relocation_data *) data;
1940 /* We know exactly the address the ADR{P,} instruction will compute.
1941 We can just write it to the destination register. */
1942 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1943
0badd99f
YQ
1944 if (is_adrp)
1945 {
1946 /* Clear the lower 12 bits of the offset to get the 4K page. */
1947 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1948 aarch64_register (rd, 1),
1949 address & ~0xfff);
1950 }
1951 else
1952 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1953 aarch64_register (rd, 1), address);
1954}
bb903df0 1955
0badd99f 1956/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1957
0badd99f
YQ
1958static void
1959aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1960 const unsigned rt, const int is64,
1961 struct aarch64_insn_data *data)
1962{
1963 struct aarch64_insn_relocation_data *insn_reloc
1964 = (struct aarch64_insn_relocation_data *) data;
1965 CORE_ADDR address = data->insn_addr + offset;
1966
1967 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1968 aarch64_register (rt, 1), address);
1969
1970 /* We know exactly what address to load from, and what register we
1971 can use:
1972
1973 MOV xd, #(oldloc + offset)
1974 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1975 ...
1976
1977 LDR xd, [xd] ; or LDRSW xd, [xd]
1978
1979 */
1980
1981 if (is_sw)
1982 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1983 aarch64_register (rt, 1),
1984 aarch64_register (rt, 1),
1985 offset_memory_operand (0));
bb903df0 1986 else
0badd99f
YQ
1987 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1988 aarch64_register (rt, is64),
1989 aarch64_register (rt, 1),
1990 offset_memory_operand (0));
1991}
1992
1993/* Implementation of aarch64_insn_visitor method "others". */
1994
1995static void
1996aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1997 struct aarch64_insn_data *data)
1998{
1999 struct aarch64_insn_relocation_data *insn_reloc
2000 = (struct aarch64_insn_relocation_data *) data;
bb903df0 2001
0badd99f
YQ
2002 /* The instruction is not PC relative. Just re-emit it at the new
2003 location. */
e1c587c3 2004 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
2005}
2006
2007static const struct aarch64_insn_visitor visitor =
2008{
2009 aarch64_ftrace_insn_reloc_b,
2010 aarch64_ftrace_insn_reloc_b_cond,
2011 aarch64_ftrace_insn_reloc_cb,
2012 aarch64_ftrace_insn_reloc_tb,
2013 aarch64_ftrace_insn_reloc_adr,
2014 aarch64_ftrace_insn_reloc_ldr_literal,
2015 aarch64_ftrace_insn_reloc_others,
2016};
2017
809a0c35
TBA
2018bool
2019aarch64_target::supports_fast_tracepoints ()
2020{
2021 return true;
2022}
2023
2024/* Implementation of target ops method
bb903df0
PL
2025 "install_fast_tracepoint_jump_pad". */
2026
809a0c35
TBA
2027int
2028aarch64_target::install_fast_tracepoint_jump_pad
2029 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2030 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2031 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2032 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2033 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2034 char *err)
bb903df0
PL
2035{
2036 uint32_t buf[256];
2037 uint32_t *p = buf;
2ac09a5b 2038 int64_t offset;
bb903df0 2039 int i;
70b439f0 2040 uint32_t insn;
bb903df0 2041 CORE_ADDR buildaddr = *jump_entry;
0badd99f 2042 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
2043
2044 /* We need to save the current state on the stack both to restore it
2045 later and to collect register values when the tracepoint is hit.
2046
2047 The saved registers are pushed in a layout that needs to be in sync
2048 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2049 the supply_fast_tracepoint_registers function will fill in the
2050 register cache from a pointer to saved registers on the stack we build
2051 here.
2052
2053 For simplicity, we set the size of each cell on the stack to 16 bytes.
2054 This way one cell can hold any register type, from system registers
2055 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2056 has to be 16 bytes aligned anyway.
2057
2058 Note that the CPSR register does not exist on AArch64. Instead we
2059 can access system bits describing the process state with the
2060 MRS/MSR instructions, namely the condition flags. We save them as
2061 if they are part of a CPSR register because that's how GDB
2062 interprets these system bits. At the moment, only the condition
2063 flags are saved in CPSR (NZCV).
2064
2065 Stack layout, each cell is 16 bytes (descending):
2066
2067 High *-------- SIMD&FP registers from 31 down to 0. --------*
2068 | q31 |
2069 . .
2070 . . 32 cells
2071 . .
2072 | q0 |
2073 *---- General purpose registers from 30 down to 0. ----*
2074 | x30 |
2075 . .
2076 . . 31 cells
2077 . .
2078 | x0 |
2079 *------------- Special purpose registers. -------------*
2080 | SP |
2081 | PC |
2082 | CPSR (NZCV) | 5 cells
2083 | FPSR |
2084 | FPCR | <- SP + 16
2085 *------------- collecting_t object --------------------*
2086 | TPIDR_EL0 | struct tracepoint * |
2087 Low *------------------------------------------------------*
2088
2089 After this stack is set up, we issue a call to the collector, passing
2090 it the saved registers at (SP + 16). */
2091
2092 /* Push SIMD&FP registers on the stack:
2093
2094 SUB sp, sp, #(32 * 16)
2095
2096 STP q30, q31, [sp, #(30 * 16)]
2097 ...
2098 STP q0, q1, [sp]
2099
2100 */
2101 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2102 for (i = 30; i >= 0; i -= 2)
2103 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2104
30baf67b 2105 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2106 to push x31 as it represents the xzr register and not the stack
2107 pointer in a STR instruction.
2108
2109 SUB sp, sp, #(31 * 16)
2110
2111 STR x30, [sp, #(30 * 16)]
2112 ...
2113 STR x0, [sp]
2114
2115 */
2116 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2117 for (i = 30; i >= 0; i -= 1)
2118 p += emit_str (p, aarch64_register (i, 1), sp,
2119 offset_memory_operand (i * 16));
2120
2121 /* Make space for 5 more cells.
2122
2123 SUB sp, sp, #(5 * 16)
2124
2125 */
2126 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2127
2128
2129 /* Save SP:
2130
2131 ADD x4, sp, #((32 + 31 + 5) * 16)
2132 STR x4, [sp, #(4 * 16)]
2133
2134 */
2135 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2136 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2137
2138 /* Save PC (tracepoint address):
2139
2140 MOV x3, #(tpaddr)
2141 ...
2142
2143 STR x3, [sp, #(3 * 16)]
2144
2145 */
2146
2147 p += emit_mov_addr (p, x3, tpaddr);
2148 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2149
2150 /* Save CPSR (NZCV), FPSR and FPCR:
2151
2152 MRS x2, nzcv
2153 MRS x1, fpsr
2154 MRS x0, fpcr
2155
2156 STR x2, [sp, #(2 * 16)]
2157 STR x1, [sp, #(1 * 16)]
2158 STR x0, [sp, #(0 * 16)]
2159
2160 */
2161 p += emit_mrs (p, x2, NZCV);
2162 p += emit_mrs (p, x1, FPSR);
2163 p += emit_mrs (p, x0, FPCR);
2164 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2165 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2166 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2167
2168 /* Push the collecting_t object. It consist of the address of the
2169 tracepoint and an ID for the current thread. We get the latter by
2170 reading the tpidr_el0 system register. It corresponds to the
2171 NT_ARM_TLS register accessible with ptrace.
2172
2173 MOV x0, #(tpoint)
2174 ...
2175
2176 MRS x1, tpidr_el0
2177
2178 STP x0, x1, [sp, #-16]!
2179
2180 */
2181
2182 p += emit_mov_addr (p, x0, tpoint);
2183 p += emit_mrs (p, x1, TPIDR_EL0);
2184 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2185
2186 /* Spin-lock:
2187
2188 The shared memory for the lock is at lockaddr. It will hold zero
2189 if no-one is holding the lock, otherwise it contains the address of
2190 the collecting_t object on the stack of the thread which acquired it.
2191
2192 At this stage, the stack pointer points to this thread's collecting_t
2193 object.
2194
2195 We use the following registers:
2196 - x0: Address of the lock.
2197 - x1: Pointer to collecting_t object.
2198 - x2: Scratch register.
2199
2200 MOV x0, #(lockaddr)
2201 ...
2202 MOV x1, sp
2203
2204 ; Trigger an event local to this core. So the following WFE
2205 ; instruction is ignored.
2206 SEVL
2207 again:
2208 ; Wait for an event. The event is triggered by either the SEVL
2209 ; or STLR instructions (store release).
2210 WFE
2211
2212 ; Atomically read at lockaddr. This marks the memory location as
2213 ; exclusive. This instruction also has memory constraints which
2214 ; make sure all previous data reads and writes are done before
2215 ; executing it.
2216 LDAXR x2, [x0]
2217
2218 ; Try again if another thread holds the lock.
2219 CBNZ x2, again
2220
2221 ; We can lock it! Write the address of the collecting_t object.
2222 ; This instruction will fail if the memory location is not marked
2223 ; as exclusive anymore. If it succeeds, it will remove the
2224 ; exclusive mark on the memory location. This way, if another
2225 ; thread executes this instruction before us, we will fail and try
2226 ; all over again.
2227 STXR w2, x1, [x0]
2228 CBNZ w2, again
2229
2230 */
2231
2232 p += emit_mov_addr (p, x0, lockaddr);
2233 p += emit_mov (p, x1, register_operand (sp));
2234
2235 p += emit_sevl (p);
2236 p += emit_wfe (p);
2237 p += emit_ldaxr (p, x2, x0);
2238 p += emit_cb (p, 1, w2, -2 * 4);
2239 p += emit_stxr (p, w2, x1, x0);
2240 p += emit_cb (p, 1, x2, -4 * 4);
2241
2242 /* Call collector (struct tracepoint *, unsigned char *):
2243
2244 MOV x0, #(tpoint)
2245 ...
2246
2247 ; Saved registers start after the collecting_t object.
2248 ADD x1, sp, #16
2249
2250 ; We use an intra-procedure-call scratch register.
2251 MOV ip0, #(collector)
2252 ...
2253
2254 ; And call back to C!
2255 BLR ip0
2256
2257 */
2258
2259 p += emit_mov_addr (p, x0, tpoint);
2260 p += emit_add (p, x1, sp, immediate_operand (16));
2261
2262 p += emit_mov_addr (p, ip0, collector);
2263 p += emit_blr (p, ip0);
2264
2265 /* Release the lock.
2266
2267 MOV x0, #(lockaddr)
2268 ...
2269
2270 ; This instruction is a normal store with memory ordering
2271 ; constraints. Thanks to this we do not have to put a data
2272 ; barrier instruction to make sure all data read and writes are done
30baf67b 2273 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2274 ; will trigger an event, letting other threads know they can grab
2275 ; the lock.
2276 STLR xzr, [x0]
2277
2278 */
2279 p += emit_mov_addr (p, x0, lockaddr);
2280 p += emit_stlr (p, xzr, x0);
2281
2282 /* Free collecting_t object:
2283
2284 ADD sp, sp, #16
2285
2286 */
2287 p += emit_add (p, sp, sp, immediate_operand (16));
2288
2289 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2290 registers from the stack.
2291
2292 LDR x2, [sp, #(2 * 16)]
2293 LDR x1, [sp, #(1 * 16)]
2294 LDR x0, [sp, #(0 * 16)]
2295
2296 MSR NZCV, x2
2297 MSR FPSR, x1
2298 MSR FPCR, x0
2299
2300 ADD sp, sp #(5 * 16)
2301
2302 */
2303 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2304 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2305 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2306 p += emit_msr (p, NZCV, x2);
2307 p += emit_msr (p, FPSR, x1);
2308 p += emit_msr (p, FPCR, x0);
2309
2310 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2311
2312 /* Pop general purpose registers:
2313
2314 LDR x0, [sp]
2315 ...
2316 LDR x30, [sp, #(30 * 16)]
2317
2318 ADD sp, sp, #(31 * 16)
2319
2320 */
2321 for (i = 0; i <= 30; i += 1)
2322 p += emit_ldr (p, aarch64_register (i, 1), sp,
2323 offset_memory_operand (i * 16));
2324 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2325
2326 /* Pop SIMD&FP registers:
2327
2328 LDP q0, q1, [sp]
2329 ...
2330 LDP q30, q31, [sp, #(30 * 16)]
2331
2332 ADD sp, sp, #(32 * 16)
2333
2334 */
2335 for (i = 0; i <= 30; i += 2)
2336 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2337 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2338
2339 /* Write the code into the inferior memory. */
2340 append_insns (&buildaddr, p - buf, buf);
2341
2342 /* Now emit the relocated instruction. */
2343 *adjusted_insn_addr = buildaddr;
70b439f0 2344 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2345
2346 insn_data.base.insn_addr = tpaddr;
2347 insn_data.new_addr = buildaddr;
2348 insn_data.insn_ptr = buf;
2349
2350 aarch64_relocate_instruction (insn, &visitor,
2351 (struct aarch64_insn_data *) &insn_data);
2352
bb903df0 2353 /* We may not have been able to relocate the instruction. */
0badd99f 2354 if (insn_data.insn_ptr == buf)
bb903df0
PL
2355 {
2356 sprintf (err,
2357 "E.Could not relocate instruction from %s to %s.",
2358 core_addr_to_string_nz (tpaddr),
2359 core_addr_to_string_nz (buildaddr));
2360 return 1;
2361 }
dfaffe9d 2362 else
0badd99f 2363 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2364 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2365
2366 /* Go back to the start of the buffer. */
2367 p = buf;
2368
2369 /* Emit a branch back from the jump pad. */
2370 offset = (tpaddr + orig_size - buildaddr);
2371 if (!can_encode_int32 (offset, 28))
2372 {
2373 sprintf (err,
2374 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2375 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2376 offset);
2377 return 1;
2378 }
2379
2380 p += emit_b (p, 0, offset);
2381 append_insns (&buildaddr, p - buf, buf);
2382
2383 /* Give the caller a branch instruction into the jump pad. */
2384 offset = (*jump_entry - tpaddr);
2385 if (!can_encode_int32 (offset, 28))
2386 {
2387 sprintf (err,
2388 "E.Jump pad too far from tracepoint "
2ac09a5b 2389 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2390 offset);
2391 return 1;
2392 }
2393
2394 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2395 *jjump_pad_insn_size = 4;
2396
2397 /* Return the end address of our pad. */
2398 *jump_entry = buildaddr;
2399
2400 return 0;
2401}
2402
afbe19f8
PL
2403/* Helper function writing LEN instructions from START into
2404 current_insn_ptr. */
2405
2406static void
2407emit_ops_insns (const uint32_t *start, int len)
2408{
2409 CORE_ADDR buildaddr = current_insn_ptr;
2410
2411 if (debug_threads)
2412 debug_printf ("Adding %d instrucions at %s\n",
2413 len, paddress (buildaddr));
2414
2415 append_insns (&buildaddr, len, start);
2416 current_insn_ptr = buildaddr;
2417}
2418
2419/* Pop a register from the stack. */
2420
2421static int
2422emit_pop (uint32_t *buf, struct aarch64_register rt)
2423{
2424 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2425}
2426
2427/* Push a register on the stack. */
2428
2429static int
2430emit_push (uint32_t *buf, struct aarch64_register rt)
2431{
2432 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2433}
2434
2435/* Implementation of emit_ops method "emit_prologue". */
2436
2437static void
2438aarch64_emit_prologue (void)
2439{
2440 uint32_t buf[16];
2441 uint32_t *p = buf;
2442
2443 /* This function emit a prologue for the following function prototype:
2444
2445 enum eval_result_type f (unsigned char *regs,
2446 ULONGEST *value);
2447
2448 The first argument is a buffer of raw registers. The second
2449 argument is the result of
2450 evaluating the expression, which will be set to whatever is on top of
2451 the stack at the end.
2452
2453 The stack set up by the prologue is as such:
2454
2455 High *------------------------------------------------------*
2456 | LR |
2457 | FP | <- FP
2458 | x1 (ULONGEST *value) |
2459 | x0 (unsigned char *regs) |
2460 Low *------------------------------------------------------*
2461
2462 As we are implementing a stack machine, each opcode can expand the
2463 stack so we never know how far we are from the data saved by this
2464 prologue. In order to be able refer to value and regs later, we save
2465 the current stack pointer in the frame pointer. This way, it is not
2466 clobbered when calling C functions.
2467
30baf67b 2468 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2469 top of the stack, and x1 as a scratch register. */
2470
2471 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2472 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2473 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2474
2475 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2476
2477
2478 emit_ops_insns (buf, p - buf);
2479}
2480
2481/* Implementation of emit_ops method "emit_epilogue". */
2482
2483static void
2484aarch64_emit_epilogue (void)
2485{
2486 uint32_t buf[16];
2487 uint32_t *p = buf;
2488
2489 /* Store the result of the expression (x0) in *value. */
2490 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2491 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2492 p += emit_str (p, x0, x1, offset_memory_operand (0));
2493
2494 /* Restore the previous state. */
2495 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2496 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2497
2498 /* Return expr_eval_no_error. */
2499 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2500 p += emit_ret (p, lr);
2501
2502 emit_ops_insns (buf, p - buf);
2503}
2504
2505/* Implementation of emit_ops method "emit_add". */
2506
2507static void
2508aarch64_emit_add (void)
2509{
2510 uint32_t buf[16];
2511 uint32_t *p = buf;
2512
2513 p += emit_pop (p, x1);
45e3745e 2514 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2515
2516 emit_ops_insns (buf, p - buf);
2517}
2518
2519/* Implementation of emit_ops method "emit_sub". */
2520
2521static void
2522aarch64_emit_sub (void)
2523{
2524 uint32_t buf[16];
2525 uint32_t *p = buf;
2526
2527 p += emit_pop (p, x1);
45e3745e 2528 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2529
2530 emit_ops_insns (buf, p - buf);
2531}
2532
2533/* Implementation of emit_ops method "emit_mul". */
2534
2535static void
2536aarch64_emit_mul (void)
2537{
2538 uint32_t buf[16];
2539 uint32_t *p = buf;
2540
2541 p += emit_pop (p, x1);
2542 p += emit_mul (p, x0, x1, x0);
2543
2544 emit_ops_insns (buf, p - buf);
2545}
2546
2547/* Implementation of emit_ops method "emit_lsh". */
2548
2549static void
2550aarch64_emit_lsh (void)
2551{
2552 uint32_t buf[16];
2553 uint32_t *p = buf;
2554
2555 p += emit_pop (p, x1);
2556 p += emit_lslv (p, x0, x1, x0);
2557
2558 emit_ops_insns (buf, p - buf);
2559}
2560
2561/* Implementation of emit_ops method "emit_rsh_signed". */
2562
2563static void
2564aarch64_emit_rsh_signed (void)
2565{
2566 uint32_t buf[16];
2567 uint32_t *p = buf;
2568
2569 p += emit_pop (p, x1);
2570 p += emit_asrv (p, x0, x1, x0);
2571
2572 emit_ops_insns (buf, p - buf);
2573}
2574
2575/* Implementation of emit_ops method "emit_rsh_unsigned". */
2576
2577static void
2578aarch64_emit_rsh_unsigned (void)
2579{
2580 uint32_t buf[16];
2581 uint32_t *p = buf;
2582
2583 p += emit_pop (p, x1);
2584 p += emit_lsrv (p, x0, x1, x0);
2585
2586 emit_ops_insns (buf, p - buf);
2587}
2588
2589/* Implementation of emit_ops method "emit_ext". */
2590
2591static void
2592aarch64_emit_ext (int arg)
2593{
2594 uint32_t buf[16];
2595 uint32_t *p = buf;
2596
2597 p += emit_sbfx (p, x0, x0, 0, arg);
2598
2599 emit_ops_insns (buf, p - buf);
2600}
2601
2602/* Implementation of emit_ops method "emit_log_not". */
2603
2604static void
2605aarch64_emit_log_not (void)
2606{
2607 uint32_t buf[16];
2608 uint32_t *p = buf;
2609
2610 /* If the top of the stack is 0, replace it with 1. Else replace it with
2611 0. */
2612
2613 p += emit_cmp (p, x0, immediate_operand (0));
2614 p += emit_cset (p, x0, EQ);
2615
2616 emit_ops_insns (buf, p - buf);
2617}
2618
2619/* Implementation of emit_ops method "emit_bit_and". */
2620
2621static void
2622aarch64_emit_bit_and (void)
2623{
2624 uint32_t buf[16];
2625 uint32_t *p = buf;
2626
2627 p += emit_pop (p, x1);
2628 p += emit_and (p, x0, x0, x1);
2629
2630 emit_ops_insns (buf, p - buf);
2631}
2632
2633/* Implementation of emit_ops method "emit_bit_or". */
2634
2635static void
2636aarch64_emit_bit_or (void)
2637{
2638 uint32_t buf[16];
2639 uint32_t *p = buf;
2640
2641 p += emit_pop (p, x1);
2642 p += emit_orr (p, x0, x0, x1);
2643
2644 emit_ops_insns (buf, p - buf);
2645}
2646
2647/* Implementation of emit_ops method "emit_bit_xor". */
2648
2649static void
2650aarch64_emit_bit_xor (void)
2651{
2652 uint32_t buf[16];
2653 uint32_t *p = buf;
2654
2655 p += emit_pop (p, x1);
2656 p += emit_eor (p, x0, x0, x1);
2657
2658 emit_ops_insns (buf, p - buf);
2659}
2660
2661/* Implementation of emit_ops method "emit_bit_not". */
2662
2663static void
2664aarch64_emit_bit_not (void)
2665{
2666 uint32_t buf[16];
2667 uint32_t *p = buf;
2668
2669 p += emit_mvn (p, x0, x0);
2670
2671 emit_ops_insns (buf, p - buf);
2672}
2673
2674/* Implementation of emit_ops method "emit_equal". */
2675
2676static void
2677aarch64_emit_equal (void)
2678{
2679 uint32_t buf[16];
2680 uint32_t *p = buf;
2681
2682 p += emit_pop (p, x1);
2683 p += emit_cmp (p, x0, register_operand (x1));
2684 p += emit_cset (p, x0, EQ);
2685
2686 emit_ops_insns (buf, p - buf);
2687}
2688
2689/* Implementation of emit_ops method "emit_less_signed". */
2690
2691static void
2692aarch64_emit_less_signed (void)
2693{
2694 uint32_t buf[16];
2695 uint32_t *p = buf;
2696
2697 p += emit_pop (p, x1);
2698 p += emit_cmp (p, x1, register_operand (x0));
2699 p += emit_cset (p, x0, LT);
2700
2701 emit_ops_insns (buf, p - buf);
2702}
2703
2704/* Implementation of emit_ops method "emit_less_unsigned". */
2705
2706static void
2707aarch64_emit_less_unsigned (void)
2708{
2709 uint32_t buf[16];
2710 uint32_t *p = buf;
2711
2712 p += emit_pop (p, x1);
2713 p += emit_cmp (p, x1, register_operand (x0));
2714 p += emit_cset (p, x0, LO);
2715
2716 emit_ops_insns (buf, p - buf);
2717}
2718
2719/* Implementation of emit_ops method "emit_ref". */
2720
2721static void
2722aarch64_emit_ref (int size)
2723{
2724 uint32_t buf[16];
2725 uint32_t *p = buf;
2726
2727 switch (size)
2728 {
2729 case 1:
2730 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2731 break;
2732 case 2:
2733 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2734 break;
2735 case 4:
2736 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2737 break;
2738 case 8:
2739 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2740 break;
2741 default:
2742 /* Unknown size, bail on compilation. */
2743 emit_error = 1;
2744 break;
2745 }
2746
2747 emit_ops_insns (buf, p - buf);
2748}
2749
2750/* Implementation of emit_ops method "emit_if_goto". */
2751
2752static void
2753aarch64_emit_if_goto (int *offset_p, int *size_p)
2754{
2755 uint32_t buf[16];
2756 uint32_t *p = buf;
2757
2758 /* The Z flag is set or cleared here. */
2759 p += emit_cmp (p, x0, immediate_operand (0));
2760 /* This instruction must not change the Z flag. */
2761 p += emit_pop (p, x0);
2762 /* Branch over the next instruction if x0 == 0. */
2763 p += emit_bcond (p, EQ, 8);
2764
2765 /* The NOP instruction will be patched with an unconditional branch. */
2766 if (offset_p)
2767 *offset_p = (p - buf) * 4;
2768 if (size_p)
2769 *size_p = 4;
2770 p += emit_nop (p);
2771
2772 emit_ops_insns (buf, p - buf);
2773}
2774
2775/* Implementation of emit_ops method "emit_goto". */
2776
2777static void
2778aarch64_emit_goto (int *offset_p, int *size_p)
2779{
2780 uint32_t buf[16];
2781 uint32_t *p = buf;
2782
2783 /* The NOP instruction will be patched with an unconditional branch. */
2784 if (offset_p)
2785 *offset_p = 0;
2786 if (size_p)
2787 *size_p = 4;
2788 p += emit_nop (p);
2789
2790 emit_ops_insns (buf, p - buf);
2791}
2792
2793/* Implementation of emit_ops method "write_goto_address". */
2794
bb1183e2 2795static void
afbe19f8
PL
2796aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2797{
2798 uint32_t insn;
2799
2800 emit_b (&insn, 0, to - from);
2801 append_insns (&from, 1, &insn);
2802}
2803
2804/* Implementation of emit_ops method "emit_const". */
2805
2806static void
2807aarch64_emit_const (LONGEST num)
2808{
2809 uint32_t buf[16];
2810 uint32_t *p = buf;
2811
2812 p += emit_mov_addr (p, x0, num);
2813
2814 emit_ops_insns (buf, p - buf);
2815}
2816
2817/* Implementation of emit_ops method "emit_call". */
2818
2819static void
2820aarch64_emit_call (CORE_ADDR fn)
2821{
2822 uint32_t buf[16];
2823 uint32_t *p = buf;
2824
2825 p += emit_mov_addr (p, ip0, fn);
2826 p += emit_blr (p, ip0);
2827
2828 emit_ops_insns (buf, p - buf);
2829}
2830
2831/* Implementation of emit_ops method "emit_reg". */
2832
2833static void
2834aarch64_emit_reg (int reg)
2835{
2836 uint32_t buf[16];
2837 uint32_t *p = buf;
2838
2839 /* Set x0 to unsigned char *regs. */
2840 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2841 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2842 p += emit_mov (p, x1, immediate_operand (reg));
2843
2844 emit_ops_insns (buf, p - buf);
2845
2846 aarch64_emit_call (get_raw_reg_func_addr ());
2847}
2848
2849/* Implementation of emit_ops method "emit_pop". */
2850
2851static void
2852aarch64_emit_pop (void)
2853{
2854 uint32_t buf[16];
2855 uint32_t *p = buf;
2856
2857 p += emit_pop (p, x0);
2858
2859 emit_ops_insns (buf, p - buf);
2860}
2861
2862/* Implementation of emit_ops method "emit_stack_flush". */
2863
2864static void
2865aarch64_emit_stack_flush (void)
2866{
2867 uint32_t buf[16];
2868 uint32_t *p = buf;
2869
2870 p += emit_push (p, x0);
2871
2872 emit_ops_insns (buf, p - buf);
2873}
2874
2875/* Implementation of emit_ops method "emit_zero_ext". */
2876
2877static void
2878aarch64_emit_zero_ext (int arg)
2879{
2880 uint32_t buf[16];
2881 uint32_t *p = buf;
2882
2883 p += emit_ubfx (p, x0, x0, 0, arg);
2884
2885 emit_ops_insns (buf, p - buf);
2886}
2887
2888/* Implementation of emit_ops method "emit_swap". */
2889
2890static void
2891aarch64_emit_swap (void)
2892{
2893 uint32_t buf[16];
2894 uint32_t *p = buf;
2895
2896 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2897 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2898 p += emit_mov (p, x0, register_operand (x1));
2899
2900 emit_ops_insns (buf, p - buf);
2901}
2902
2903/* Implementation of emit_ops method "emit_stack_adjust". */
2904
2905static void
2906aarch64_emit_stack_adjust (int n)
2907{
2908 /* This is not needed with our design. */
2909 uint32_t buf[16];
2910 uint32_t *p = buf;
2911
2912 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2913
2914 emit_ops_insns (buf, p - buf);
2915}
2916
2917/* Implementation of emit_ops method "emit_int_call_1". */
2918
2919static void
2920aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2921{
2922 uint32_t buf[16];
2923 uint32_t *p = buf;
2924
2925 p += emit_mov (p, x0, immediate_operand (arg1));
2926
2927 emit_ops_insns (buf, p - buf);
2928
2929 aarch64_emit_call (fn);
2930}
2931
2932/* Implementation of emit_ops method "emit_void_call_2". */
2933
2934static void
2935aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2936{
2937 uint32_t buf[16];
2938 uint32_t *p = buf;
2939
2940 /* Push x0 on the stack. */
2941 aarch64_emit_stack_flush ();
2942
2943 /* Setup arguments for the function call:
2944
2945 x0: arg1
2946 x1: top of the stack
2947
2948 MOV x1, x0
2949 MOV x0, #arg1 */
2950
2951 p += emit_mov (p, x1, register_operand (x0));
2952 p += emit_mov (p, x0, immediate_operand (arg1));
2953
2954 emit_ops_insns (buf, p - buf);
2955
2956 aarch64_emit_call (fn);
2957
2958 /* Restore x0. */
2959 aarch64_emit_pop ();
2960}
2961
2962/* Implementation of emit_ops method "emit_eq_goto". */
2963
2964static void
2965aarch64_emit_eq_goto (int *offset_p, int *size_p)
2966{
2967 uint32_t buf[16];
2968 uint32_t *p = buf;
2969
2970 p += emit_pop (p, x1);
2971 p += emit_cmp (p, x1, register_operand (x0));
2972 /* Branch over the next instruction if x0 != x1. */
2973 p += emit_bcond (p, NE, 8);
2974 /* The NOP instruction will be patched with an unconditional branch. */
2975 if (offset_p)
2976 *offset_p = (p - buf) * 4;
2977 if (size_p)
2978 *size_p = 4;
2979 p += emit_nop (p);
2980
2981 emit_ops_insns (buf, p - buf);
2982}
2983
2984/* Implementation of emit_ops method "emit_ne_goto". */
2985
2986static void
2987aarch64_emit_ne_goto (int *offset_p, int *size_p)
2988{
2989 uint32_t buf[16];
2990 uint32_t *p = buf;
2991
2992 p += emit_pop (p, x1);
2993 p += emit_cmp (p, x1, register_operand (x0));
2994 /* Branch over the next instruction if x0 == x1. */
2995 p += emit_bcond (p, EQ, 8);
2996 /* The NOP instruction will be patched with an unconditional branch. */
2997 if (offset_p)
2998 *offset_p = (p - buf) * 4;
2999 if (size_p)
3000 *size_p = 4;
3001 p += emit_nop (p);
3002
3003 emit_ops_insns (buf, p - buf);
3004}
3005
3006/* Implementation of emit_ops method "emit_lt_goto". */
3007
3008static void
3009aarch64_emit_lt_goto (int *offset_p, int *size_p)
3010{
3011 uint32_t buf[16];
3012 uint32_t *p = buf;
3013
3014 p += emit_pop (p, x1);
3015 p += emit_cmp (p, x1, register_operand (x0));
3016 /* Branch over the next instruction if x0 >= x1. */
3017 p += emit_bcond (p, GE, 8);
3018 /* The NOP instruction will be patched with an unconditional branch. */
3019 if (offset_p)
3020 *offset_p = (p - buf) * 4;
3021 if (size_p)
3022 *size_p = 4;
3023 p += emit_nop (p);
3024
3025 emit_ops_insns (buf, p - buf);
3026}
3027
3028/* Implementation of emit_ops method "emit_le_goto". */
3029
3030static void
3031aarch64_emit_le_goto (int *offset_p, int *size_p)
3032{
3033 uint32_t buf[16];
3034 uint32_t *p = buf;
3035
3036 p += emit_pop (p, x1);
3037 p += emit_cmp (p, x1, register_operand (x0));
3038 /* Branch over the next instruction if x0 > x1. */
3039 p += emit_bcond (p, GT, 8);
3040 /* The NOP instruction will be patched with an unconditional branch. */
3041 if (offset_p)
3042 *offset_p = (p - buf) * 4;
3043 if (size_p)
3044 *size_p = 4;
3045 p += emit_nop (p);
3046
3047 emit_ops_insns (buf, p - buf);
3048}
3049
3050/* Implementation of emit_ops method "emit_gt_goto". */
3051
3052static void
3053aarch64_emit_gt_goto (int *offset_p, int *size_p)
3054{
3055 uint32_t buf[16];
3056 uint32_t *p = buf;
3057
3058 p += emit_pop (p, x1);
3059 p += emit_cmp (p, x1, register_operand (x0));
3060 /* Branch over the next instruction if x0 <= x1. */
3061 p += emit_bcond (p, LE, 8);
3062 /* The NOP instruction will be patched with an unconditional branch. */
3063 if (offset_p)
3064 *offset_p = (p - buf) * 4;
3065 if (size_p)
3066 *size_p = 4;
3067 p += emit_nop (p);
3068
3069 emit_ops_insns (buf, p - buf);
3070}
3071
3072/* Implementation of emit_ops method "emit_ge_got". */
3073
3074static void
3075aarch64_emit_ge_got (int *offset_p, int *size_p)
3076{
3077 uint32_t buf[16];
3078 uint32_t *p = buf;
3079
3080 p += emit_pop (p, x1);
3081 p += emit_cmp (p, x1, register_operand (x0));
3082 /* Branch over the next instruction if x0 <= x1. */
3083 p += emit_bcond (p, LT, 8);
3084 /* The NOP instruction will be patched with an unconditional branch. */
3085 if (offset_p)
3086 *offset_p = (p - buf) * 4;
3087 if (size_p)
3088 *size_p = 4;
3089 p += emit_nop (p);
3090
3091 emit_ops_insns (buf, p - buf);
3092}
3093
3094static struct emit_ops aarch64_emit_ops_impl =
3095{
3096 aarch64_emit_prologue,
3097 aarch64_emit_epilogue,
3098 aarch64_emit_add,
3099 aarch64_emit_sub,
3100 aarch64_emit_mul,
3101 aarch64_emit_lsh,
3102 aarch64_emit_rsh_signed,
3103 aarch64_emit_rsh_unsigned,
3104 aarch64_emit_ext,
3105 aarch64_emit_log_not,
3106 aarch64_emit_bit_and,
3107 aarch64_emit_bit_or,
3108 aarch64_emit_bit_xor,
3109 aarch64_emit_bit_not,
3110 aarch64_emit_equal,
3111 aarch64_emit_less_signed,
3112 aarch64_emit_less_unsigned,
3113 aarch64_emit_ref,
3114 aarch64_emit_if_goto,
3115 aarch64_emit_goto,
3116 aarch64_write_goto_address,
3117 aarch64_emit_const,
3118 aarch64_emit_call,
3119 aarch64_emit_reg,
3120 aarch64_emit_pop,
3121 aarch64_emit_stack_flush,
3122 aarch64_emit_zero_ext,
3123 aarch64_emit_swap,
3124 aarch64_emit_stack_adjust,
3125 aarch64_emit_int_call_1,
3126 aarch64_emit_void_call_2,
3127 aarch64_emit_eq_goto,
3128 aarch64_emit_ne_goto,
3129 aarch64_emit_lt_goto,
3130 aarch64_emit_le_goto,
3131 aarch64_emit_gt_goto,
3132 aarch64_emit_ge_got,
3133};
3134
ab64c999 3135/* Implementation of target ops method "emit_ops". */
afbe19f8 3136
ab64c999
TBA
3137emit_ops *
3138aarch64_target::emit_ops ()
afbe19f8
PL
3139{
3140 return &aarch64_emit_ops_impl;
3141}
3142
809a0c35 3143/* Implementation of target ops method
bb903df0
PL
3144 "get_min_fast_tracepoint_insn_len". */
3145
809a0c35
TBA
3146int
3147aarch64_target::get_min_fast_tracepoint_insn_len ()
bb903df0
PL
3148{
3149 return 4;
3150}
3151
9cfd8715 3152/* Implementation of linux target ops method "low_supports_range_stepping". */
d1d0aea1 3153
9cfd8715
TBA
3154bool
3155aarch64_target::low_supports_range_stepping ()
d1d0aea1 3156{
9cfd8715 3157 return true;
d1d0aea1
PL
3158}
3159
3ca4edb6 3160/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3161
3ca4edb6
TBA
3162const gdb_byte *
3163aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3164{
17b1509a
YQ
3165 if (is_64bit_tdesc ())
3166 {
3167 *size = aarch64_breakpoint_len;
3168 return aarch64_breakpoint;
3169 }
3170 else
3171 return arm_sw_breakpoint_from_kind (kind, size);
3172}
3173
06250e4e 3174/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3175
06250e4e
TBA
3176int
3177aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3178{
3179 if (is_64bit_tdesc ())
3180 return aarch64_breakpoint_len;
3181 else
3182 return arm_breakpoint_kind_from_pc (pcptr);
3183}
3184
06250e4e 3185/* Implementation of the target ops method
17b1509a
YQ
3186 "breakpoint_kind_from_current_state". */
3187
06250e4e
TBA
3188int
3189aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3190{
3191 if (is_64bit_tdesc ())
3192 return aarch64_breakpoint_len;
3193 else
3194 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3195}
3196
ef0478f6
TBA
3197/* The linux target ops object. */
3198
3199linux_process_target *the_linux_target = &the_aarch64_target;
3200
3aee8918
PA
3201void
3202initialize_low_arch (void)
3203{
3b53ae99
YQ
3204 initialize_low_arch_aarch32 ();
3205
3aee8918 3206 initialize_regsets_info (&aarch64_regsets_info);
02895270 3207 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3208}
This page took 0.780201 seconds and 4 git commands to generate.