gdbserver/linux-low: turn 'breakpoint_at' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
797bcff5
TBA
66protected:
67
68 void low_arch_setup () override;
daca57a7
TBA
69
70 bool low_cannot_fetch_register (int regno) override;
71
72 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
73
74 bool low_supports_breakpoints () override;
75
76 CORE_ADDR low_get_pc (regcache *regcache) override;
77
78 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
79
80 bool low_breakpoint_at (CORE_ADDR pc) override;
ef0478f6
TBA
81};
82
83/* The singleton target ops object. */
84
85static aarch64_target the_aarch64_target;
86
daca57a7
TBA
87bool
88aarch64_target::low_cannot_fetch_register (int regno)
89{
90 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
91 "is not implemented by the target");
92}
93
94bool
95aarch64_target::low_cannot_store_register (int regno)
96{
97 gdb_assert_not_reached ("linux target op low_cannot_store_register "
98 "is not implemented by the target");
99}
100
176eb98c
MS
101/* Per-process arch-specific data we want to keep. */
102
103struct arch_process_info
104{
105 /* Hardware breakpoint/watchpoint data.
106 The reason for them to be per-process rather than per-thread is
107 due to the lack of information in the gdbserver environment;
108 gdbserver is not told that whether a requested hardware
109 breakpoint/watchpoint is thread specific or not, so it has to set
110 each hw bp/wp for every thread in the current process. The
111 higher level bp/wp management in gdb will resume a thread if a hw
112 bp/wp trap is not expected for it. Since the hw bp/wp setting is
113 same for each thread, it is reasonable for the data to live here.
114 */
115 struct aarch64_debug_reg_state debug_reg_state;
116};
117
3b53ae99
YQ
118/* Return true if the size of register 0 is 8 byte. */
119
120static int
121is_64bit_tdesc (void)
122{
123 struct regcache *regcache = get_thread_regcache (current_thread, 0);
124
125 return register_size (regcache->tdesc, 0) == 8;
126}
127
02895270
AH
128/* Return true if the regcache contains the number of SVE registers. */
129
130static bool
131is_sve_tdesc (void)
132{
133 struct regcache *regcache = get_thread_regcache (current_thread, 0);
134
6cdd651f 135 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
136}
137
176eb98c
MS
138static void
139aarch64_fill_gregset (struct regcache *regcache, void *buf)
140{
6a69a054 141 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
142 int i;
143
144 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
145 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
146 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
147 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
148 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
149}
150
151static void
152aarch64_store_gregset (struct regcache *regcache, const void *buf)
153{
6a69a054 154 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
155 int i;
156
157 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
158 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
159 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
160 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
161 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
162}
163
164static void
165aarch64_fill_fpregset (struct regcache *regcache, void *buf)
166{
9caa3311 167 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
168 int i;
169
170 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
171 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
172 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
173 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
174}
175
176static void
177aarch64_store_fpregset (struct regcache *regcache, const void *buf)
178{
9caa3311
YQ
179 const struct user_fpsimd_state *regset
180 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
181 int i;
182
183 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
184 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
185 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
186 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
187}
188
1ef53e6b
AH
189/* Store the pauth registers to regcache. */
190
191static void
192aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
193{
194 uint64_t *pauth_regset = (uint64_t *) buf;
195 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
196
197 if (pauth_base == 0)
198 return;
199
200 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
201 &pauth_regset[0]);
202 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
203 &pauth_regset[1]);
204}
205
bf9ae9d8
TBA
206bool
207aarch64_target::low_supports_breakpoints ()
208{
209 return true;
210}
211
212/* Implementation of linux target ops method "low_get_pc". */
421530db 213
bf9ae9d8
TBA
214CORE_ADDR
215aarch64_target::low_get_pc (regcache *regcache)
176eb98c 216{
8a7e4587 217 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 218 return linux_get_pc_64bit (regcache);
8a7e4587 219 else
a5652c21 220 return linux_get_pc_32bit (regcache);
176eb98c
MS
221}
222
bf9ae9d8 223/* Implementation of linux target ops method "low_set_pc". */
421530db 224
bf9ae9d8
TBA
225void
226aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 227{
8a7e4587 228 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 229 linux_set_pc_64bit (regcache, pc);
8a7e4587 230 else
a5652c21 231 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
232}
233
176eb98c
MS
234#define aarch64_breakpoint_len 4
235
37d66942
PL
236/* AArch64 BRK software debug mode instruction.
237 This instruction needs to match gdb/aarch64-tdep.c
238 (aarch64_default_breakpoint). */
239static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 240
d7146cda 241/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 242
d7146cda
TBA
243bool
244aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 245{
db91f502
YQ
246 if (is_64bit_tdesc ())
247 {
248 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 249
d7146cda 250 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 251 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 252 return true;
176eb98c 253
d7146cda 254 return false;
db91f502
YQ
255 }
256 else
257 return arm_breakpoint_at (where);
176eb98c
MS
258}
259
176eb98c
MS
260static void
261aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
262{
263 int i;
264
265 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
266 {
267 state->dr_addr_bp[i] = 0;
268 state->dr_ctrl_bp[i] = 0;
269 state->dr_ref_count_bp[i] = 0;
270 }
271
272 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
273 {
274 state->dr_addr_wp[i] = 0;
275 state->dr_ctrl_wp[i] = 0;
276 state->dr_ref_count_wp[i] = 0;
277 }
278}
279
176eb98c
MS
280/* Return the pointer to the debug register state structure in the
281 current process' arch-specific data area. */
282
db3cb7cb 283struct aarch64_debug_reg_state *
88e2cf7e 284aarch64_get_debug_reg_state (pid_t pid)
176eb98c 285{
88e2cf7e 286 struct process_info *proc = find_process_pid (pid);
176eb98c 287
fe978cb0 288 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
289}
290
421530db
PL
291/* Implementation of linux_target_ops method "supports_z_point_type". */
292
4ff0d3d8
PA
293static int
294aarch64_supports_z_point_type (char z_type)
295{
296 switch (z_type)
297 {
96c97461 298 case Z_PACKET_SW_BP:
4ff0d3d8
PA
299 case Z_PACKET_HW_BP:
300 case Z_PACKET_WRITE_WP:
301 case Z_PACKET_READ_WP:
302 case Z_PACKET_ACCESS_WP:
303 return 1;
304 default:
4ff0d3d8
PA
305 return 0;
306 }
307}
308
421530db 309/* Implementation of linux_target_ops method "insert_point".
176eb98c 310
421530db
PL
311 It actually only records the info of the to-be-inserted bp/wp;
312 the actual insertion will happen when threads are resumed. */
176eb98c
MS
313
314static int
802e8e6d
PA
315aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
316 int len, struct raw_breakpoint *bp)
176eb98c
MS
317{
318 int ret;
4ff0d3d8 319 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
320 struct aarch64_debug_reg_state *state
321 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 322
c5e92cca 323 if (show_debug_regs)
176eb98c
MS
324 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
325 (unsigned long) addr, len);
326
802e8e6d
PA
327 /* Determine the type from the raw breakpoint type. */
328 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
329
330 if (targ_type != hw_execute)
39edd165
YQ
331 {
332 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
333 ret = aarch64_handle_watchpoint (targ_type, addr, len,
334 1 /* is_insert */, state);
335 else
336 ret = -1;
337 }
176eb98c 338 else
8d689ee5
YQ
339 {
340 if (len == 3)
341 {
342 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
343 instruction. Set it to 2 to correctly encode length bit
344 mask in hardware/watchpoint control register. */
345 len = 2;
346 }
347 ret = aarch64_handle_breakpoint (targ_type, addr, len,
348 1 /* is_insert */, state);
349 }
176eb98c 350
60a191ed 351 if (show_debug_regs)
88e2cf7e
YQ
352 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
353 targ_type);
176eb98c
MS
354
355 return ret;
356}
357
421530db 358/* Implementation of linux_target_ops method "remove_point".
176eb98c 359
421530db
PL
360 It actually only records the info of the to-be-removed bp/wp,
361 the actual removal will be done when threads are resumed. */
176eb98c
MS
362
363static int
802e8e6d
PA
364aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
365 int len, struct raw_breakpoint *bp)
176eb98c
MS
366{
367 int ret;
4ff0d3d8 368 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
369 struct aarch64_debug_reg_state *state
370 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 371
c5e92cca 372 if (show_debug_regs)
176eb98c
MS
373 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
374 (unsigned long) addr, len);
375
802e8e6d
PA
376 /* Determine the type from the raw breakpoint type. */
377 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
378
379 /* Set up state pointers. */
380 if (targ_type != hw_execute)
381 ret =
c67ca4de
YQ
382 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
383 state);
176eb98c 384 else
8d689ee5
YQ
385 {
386 if (len == 3)
387 {
388 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
389 instruction. Set it to 2 to correctly encode length bit
390 mask in hardware/watchpoint control register. */
391 len = 2;
392 }
393 ret = aarch64_handle_breakpoint (targ_type, addr, len,
394 0 /* is_insert */, state);
395 }
176eb98c 396
60a191ed 397 if (show_debug_regs)
88e2cf7e
YQ
398 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
399 targ_type);
176eb98c
MS
400
401 return ret;
402}
403
421530db 404/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
405
406static CORE_ADDR
407aarch64_stopped_data_address (void)
408{
409 siginfo_t siginfo;
410 int pid, i;
411 struct aarch64_debug_reg_state *state;
412
0bfdf32f 413 pid = lwpid_of (current_thread);
176eb98c
MS
414
415 /* Get the siginfo. */
416 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
417 return (CORE_ADDR) 0;
418
419 /* Need to be a hardware breakpoint/watchpoint trap. */
420 if (siginfo.si_signo != SIGTRAP
421 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
422 return (CORE_ADDR) 0;
423
424 /* Check if the address matches any watched address. */
88e2cf7e 425 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
426 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
427 {
a3b60e45
JK
428 const unsigned int offset
429 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
430 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
431 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
432 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
433 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
434 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
435
176eb98c
MS
436 if (state->dr_ref_count_wp[i]
437 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 438 && addr_trap >= addr_watch_aligned
176eb98c 439 && addr_trap < addr_watch + len)
a3b60e45
JK
440 {
441 /* ADDR_TRAP reports the first address of the memory range
442 accessed by the CPU, regardless of what was the memory
443 range watched. Thus, a large CPU access that straddles
444 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
445 ADDR_TRAP that is lower than the
446 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
447
448 addr: | 4 | 5 | 6 | 7 | 8 |
449 |---- range watched ----|
450 |----------- range accessed ------------|
451
452 In this case, ADDR_TRAP will be 4.
453
454 To match a watchpoint known to GDB core, we must never
455 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
456 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
457 positive on kernels older than 4.10. See PR
458 external/20207. */
459 return addr_orig;
460 }
176eb98c
MS
461 }
462
463 return (CORE_ADDR) 0;
464}
465
421530db 466/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
467
468static int
469aarch64_stopped_by_watchpoint (void)
470{
471 if (aarch64_stopped_data_address () != 0)
472 return 1;
473 else
474 return 0;
475}
476
477/* Fetch the thread-local storage pointer for libthread_db. */
478
479ps_err_e
754653a7 480ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
481 lwpid_t lwpid, int idx, void **base)
482{
a0cc84cd
YQ
483 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
484 is_64bit_tdesc ());
176eb98c
MS
485}
486
ade90bde
YQ
487/* Implementation of linux_target_ops method "siginfo_fixup". */
488
489static int
8adce034 490aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
491{
492 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
493 if (!is_64bit_tdesc ())
494 {
495 if (direction == 0)
496 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
497 native);
498 else
499 aarch64_siginfo_from_compat_siginfo (native,
500 (struct compat_siginfo *) inf);
501
502 return 1;
503 }
504
505 return 0;
506}
507
04ec7890 508/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
509
510static struct arch_process_info *
511aarch64_linux_new_process (void)
512{
8d749320 513 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
514
515 aarch64_init_debug_reg_state (&info->debug_reg_state);
516
517 return info;
518}
519
04ec7890
SM
520/* Implementation of linux_target_ops method "delete_process". */
521
522static void
523aarch64_linux_delete_process (struct arch_process_info *info)
524{
525 xfree (info);
526}
527
421530db
PL
528/* Implementation of linux_target_ops method "linux_new_fork". */
529
3a8a0396
DB
530static void
531aarch64_linux_new_fork (struct process_info *parent,
532 struct process_info *child)
533{
534 /* These are allocated by linux_add_process. */
61a7418c
DB
535 gdb_assert (parent->priv != NULL
536 && parent->priv->arch_private != NULL);
537 gdb_assert (child->priv != NULL
538 && child->priv->arch_private != NULL);
3a8a0396
DB
539
540 /* Linux kernel before 2.6.33 commit
541 72f674d203cd230426437cdcf7dd6f681dad8b0d
542 will inherit hardware debug registers from parent
543 on fork/vfork/clone. Newer Linux kernels create such tasks with
544 zeroed debug registers.
545
546 GDB core assumes the child inherits the watchpoints/hw
547 breakpoints of the parent, and will remove them all from the
548 forked off process. Copy the debug registers mirrors into the
549 new process so that all breakpoints and watchpoints can be
550 removed together. The debug registers mirror will become zeroed
551 in the end before detaching the forked off process, thus making
552 this compatible with older Linux kernels too. */
553
61a7418c 554 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
555}
556
ee4fbcfa
AH
557/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
558#define AARCH64_HWCAP_PACA (1 << 30)
559
797bcff5 560/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 561
797bcff5
TBA
562void
563aarch64_target::low_arch_setup ()
3b53ae99
YQ
564{
565 unsigned int machine;
566 int is_elf64;
567 int tid;
568
569 tid = lwpid_of (current_thread);
570
571 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
572
573 if (is_elf64)
fefa175e
AH
574 {
575 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
576 unsigned long hwcap = linux_get_hwcap (8);
577 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
578
579 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 580 }
3b53ae99 581 else
7cc17433 582 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 583
af1b22f3 584 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
585}
586
02895270
AH
587/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
588
589static void
590aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
591{
592 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
593}
594
595/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
596
597static void
598aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
599{
600 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
601}
602
3aee8918 603static struct regset_info aarch64_regsets[] =
176eb98c
MS
604{
605 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
606 sizeof (struct user_pt_regs), GENERAL_REGS,
607 aarch64_fill_gregset, aarch64_store_gregset },
608 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
609 sizeof (struct user_fpsimd_state), FP_REGS,
610 aarch64_fill_fpregset, aarch64_store_fpregset
611 },
1ef53e6b
AH
612 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
613 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
614 NULL, aarch64_store_pauthregset },
50bc912a 615 NULL_REGSET
176eb98c
MS
616};
617
3aee8918
PA
618static struct regsets_info aarch64_regsets_info =
619 {
620 aarch64_regsets, /* regsets */
621 0, /* num_regsets */
622 NULL, /* disabled_regsets */
623 };
624
3b53ae99 625static struct regs_info regs_info_aarch64 =
3aee8918
PA
626 {
627 NULL, /* regset_bitmap */
c2d65f38 628 NULL, /* usrregs */
3aee8918
PA
629 &aarch64_regsets_info,
630 };
631
02895270
AH
632static struct regset_info aarch64_sve_regsets[] =
633{
634 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
635 sizeof (struct user_pt_regs), GENERAL_REGS,
636 aarch64_fill_gregset, aarch64_store_gregset },
637 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
638 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
639 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
640 },
1ef53e6b
AH
641 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
642 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
643 NULL, aarch64_store_pauthregset },
02895270
AH
644 NULL_REGSET
645};
646
647static struct regsets_info aarch64_sve_regsets_info =
648 {
649 aarch64_sve_regsets, /* regsets. */
650 0, /* num_regsets. */
651 NULL, /* disabled_regsets. */
652 };
653
654static struct regs_info regs_info_aarch64_sve =
655 {
656 NULL, /* regset_bitmap. */
657 NULL, /* usrregs. */
658 &aarch64_sve_regsets_info,
659 };
660
aa8d21c9 661/* Implementation of linux target ops method "get_regs_info". */
421530db 662
aa8d21c9
TBA
663const regs_info *
664aarch64_target::get_regs_info ()
3aee8918 665{
02895270 666 if (!is_64bit_tdesc ())
3b53ae99 667 return &regs_info_aarch32;
02895270
AH
668
669 if (is_sve_tdesc ())
670 return &regs_info_aarch64_sve;
671
672 return &regs_info_aarch64;
3aee8918
PA
673}
674
7671bf47
PL
675/* Implementation of linux_target_ops method "supports_tracepoints". */
676
677static int
678aarch64_supports_tracepoints (void)
679{
524b57e6
YQ
680 if (current_thread == NULL)
681 return 1;
682 else
683 {
684 /* We don't support tracepoints on aarch32 now. */
685 return is_64bit_tdesc ();
686 }
7671bf47
PL
687}
688
bb903df0
PL
689/* Implementation of linux_target_ops method "get_thread_area". */
690
691static int
692aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
693{
694 struct iovec iovec;
695 uint64_t reg;
696
697 iovec.iov_base = &reg;
698 iovec.iov_len = sizeof (reg);
699
700 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
701 return -1;
702
703 *addrp = reg;
704
705 return 0;
706}
707
061fc021
YQ
708/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
709
710static void
711aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
712{
713 int use_64bit = register_size (regcache->tdesc, 0) == 8;
714
715 if (use_64bit)
716 {
717 long l_sysno;
718
719 collect_register_by_name (regcache, "x8", &l_sysno);
720 *sysno = (int) l_sysno;
721 }
722 else
723 collect_register_by_name (regcache, "r7", sysno);
724}
725
afbe19f8
PL
726/* List of condition codes that we need. */
727
728enum aarch64_condition_codes
729{
730 EQ = 0x0,
731 NE = 0x1,
732 LO = 0x3,
733 GE = 0xa,
734 LT = 0xb,
735 GT = 0xc,
736 LE = 0xd,
bb903df0
PL
737};
738
6c1c9a8b
YQ
739enum aarch64_operand_type
740{
741 OPERAND_IMMEDIATE,
742 OPERAND_REGISTER,
743};
744
bb903df0
PL
745/* Representation of an operand. At this time, it only supports register
746 and immediate types. */
747
748struct aarch64_operand
749{
750 /* Type of the operand. */
6c1c9a8b
YQ
751 enum aarch64_operand_type type;
752
bb903df0
PL
753 /* Value of the operand according to the type. */
754 union
755 {
756 uint32_t imm;
757 struct aarch64_register reg;
758 };
759};
760
761/* List of registers that we are currently using, we can add more here as
762 we need to use them. */
763
764/* General purpose scratch registers (64 bit). */
765static const struct aarch64_register x0 = { 0, 1 };
766static const struct aarch64_register x1 = { 1, 1 };
767static const struct aarch64_register x2 = { 2, 1 };
768static const struct aarch64_register x3 = { 3, 1 };
769static const struct aarch64_register x4 = { 4, 1 };
770
771/* General purpose scratch registers (32 bit). */
afbe19f8 772static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
773static const struct aarch64_register w2 = { 2, 0 };
774
775/* Intra-procedure scratch registers. */
776static const struct aarch64_register ip0 = { 16, 1 };
777
778/* Special purpose registers. */
afbe19f8
PL
779static const struct aarch64_register fp = { 29, 1 };
780static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
781static const struct aarch64_register sp = { 31, 1 };
782static const struct aarch64_register xzr = { 31, 1 };
783
784/* Dynamically allocate a new register. If we know the register
785 statically, we should make it a global as above instead of using this
786 helper function. */
787
788static struct aarch64_register
789aarch64_register (unsigned num, int is64)
790{
791 return (struct aarch64_register) { num, is64 };
792}
793
794/* Helper function to create a register operand, for instructions with
795 different types of operands.
796
797 For example:
798 p += emit_mov (p, x0, register_operand (x1)); */
799
800static struct aarch64_operand
801register_operand (struct aarch64_register reg)
802{
803 struct aarch64_operand operand;
804
805 operand.type = OPERAND_REGISTER;
806 operand.reg = reg;
807
808 return operand;
809}
810
811/* Helper function to create an immediate operand, for instructions with
812 different types of operands.
813
814 For example:
815 p += emit_mov (p, x0, immediate_operand (12)); */
816
817static struct aarch64_operand
818immediate_operand (uint32_t imm)
819{
820 struct aarch64_operand operand;
821
822 operand.type = OPERAND_IMMEDIATE;
823 operand.imm = imm;
824
825 return operand;
826}
827
bb903df0
PL
828/* Helper function to create an offset memory operand.
829
830 For example:
831 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
832
833static struct aarch64_memory_operand
834offset_memory_operand (int32_t offset)
835{
836 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
837}
838
839/* Helper function to create a pre-index memory operand.
840
841 For example:
842 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
843
844static struct aarch64_memory_operand
845preindex_memory_operand (int32_t index)
846{
847 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
848}
849
afbe19f8
PL
850/* Helper function to create a post-index memory operand.
851
852 For example:
853 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
854
855static struct aarch64_memory_operand
856postindex_memory_operand (int32_t index)
857{
858 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
859}
860
bb903df0
PL
861/* System control registers. These special registers can be written and
862 read with the MRS and MSR instructions.
863
864 - NZCV: Condition flags. GDB refers to this register under the CPSR
865 name.
866 - FPSR: Floating-point status register.
867 - FPCR: Floating-point control registers.
868 - TPIDR_EL0: Software thread ID register. */
869
870enum aarch64_system_control_registers
871{
872 /* op0 op1 crn crm op2 */
873 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
874 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
875 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
876 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
877};
878
bb903df0
PL
879/* Write a BLR instruction into *BUF.
880
881 BLR rn
882
883 RN is the register to branch to. */
884
885static int
886emit_blr (uint32_t *buf, struct aarch64_register rn)
887{
e1c587c3 888 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
889}
890
afbe19f8 891/* Write a RET instruction into *BUF.
bb903df0 892
afbe19f8 893 RET xn
bb903df0 894
afbe19f8 895 RN is the register to branch to. */
bb903df0
PL
896
897static int
afbe19f8
PL
898emit_ret (uint32_t *buf, struct aarch64_register rn)
899{
e1c587c3 900 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
901}
902
903static int
904emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
905 struct aarch64_register rt,
906 struct aarch64_register rt2,
907 struct aarch64_register rn,
908 struct aarch64_memory_operand operand)
bb903df0
PL
909{
910 uint32_t opc;
911 uint32_t pre_index;
912 uint32_t write_back;
913
914 if (rt.is64)
915 opc = ENCODE (2, 2, 30);
916 else
917 opc = ENCODE (0, 2, 30);
918
919 switch (operand.type)
920 {
921 case MEMORY_OPERAND_OFFSET:
922 {
923 pre_index = ENCODE (1, 1, 24);
924 write_back = ENCODE (0, 1, 23);
925 break;
926 }
afbe19f8
PL
927 case MEMORY_OPERAND_POSTINDEX:
928 {
929 pre_index = ENCODE (0, 1, 24);
930 write_back = ENCODE (1, 1, 23);
931 break;
932 }
bb903df0
PL
933 case MEMORY_OPERAND_PREINDEX:
934 {
935 pre_index = ENCODE (1, 1, 24);
936 write_back = ENCODE (1, 1, 23);
937 break;
938 }
939 default:
940 return 0;
941 }
942
e1c587c3
YQ
943 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
944 | ENCODE (operand.index >> 3, 7, 15)
945 | ENCODE (rt2.num, 5, 10)
946 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
947}
948
afbe19f8
PL
949/* Write a STP instruction into *BUF.
950
951 STP rt, rt2, [rn, #offset]
952 STP rt, rt2, [rn, #index]!
953 STP rt, rt2, [rn], #index
954
955 RT and RT2 are the registers to store.
956 RN is the base address register.
957 OFFSET is the immediate to add to the base address. It is limited to a
958 -512 .. 504 range (7 bits << 3). */
959
960static int
961emit_stp (uint32_t *buf, struct aarch64_register rt,
962 struct aarch64_register rt2, struct aarch64_register rn,
963 struct aarch64_memory_operand operand)
964{
965 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
966}
967
968/* Write a LDP instruction into *BUF.
969
970 LDP rt, rt2, [rn, #offset]
971 LDP rt, rt2, [rn, #index]!
972 LDP rt, rt2, [rn], #index
973
974 RT and RT2 are the registers to store.
975 RN is the base address register.
976 OFFSET is the immediate to add to the base address. It is limited to a
977 -512 .. 504 range (7 bits << 3). */
978
979static int
980emit_ldp (uint32_t *buf, struct aarch64_register rt,
981 struct aarch64_register rt2, struct aarch64_register rn,
982 struct aarch64_memory_operand operand)
983{
984 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
985}
986
bb903df0
PL
987/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
988
989 LDP qt, qt2, [rn, #offset]
990
991 RT and RT2 are the Q registers to store.
992 RN is the base address register.
993 OFFSET is the immediate to add to the base address. It is limited to
994 -1024 .. 1008 range (7 bits << 4). */
995
996static int
997emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
998 struct aarch64_register rn, int32_t offset)
999{
1000 uint32_t opc = ENCODE (2, 2, 30);
1001 uint32_t pre_index = ENCODE (1, 1, 24);
1002
e1c587c3
YQ
1003 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1004 | ENCODE (offset >> 4, 7, 15)
1005 | ENCODE (rt2, 5, 10)
1006 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1007}
1008
1009/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1010
1011 STP qt, qt2, [rn, #offset]
1012
1013 RT and RT2 are the Q registers to store.
1014 RN is the base address register.
1015 OFFSET is the immediate to add to the base address. It is limited to
1016 -1024 .. 1008 range (7 bits << 4). */
1017
1018static int
1019emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1020 struct aarch64_register rn, int32_t offset)
1021{
1022 uint32_t opc = ENCODE (2, 2, 30);
1023 uint32_t pre_index = ENCODE (1, 1, 24);
1024
e1c587c3 1025 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1026 | ENCODE (offset >> 4, 7, 15)
1027 | ENCODE (rt2, 5, 10)
1028 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1029}
1030
afbe19f8
PL
1031/* Write a LDRH instruction into *BUF.
1032
1033 LDRH wt, [xn, #offset]
1034 LDRH wt, [xn, #index]!
1035 LDRH wt, [xn], #index
1036
1037 RT is the register to store.
1038 RN is the base address register.
1039 OFFSET is the immediate to add to the base address. It is limited to
1040 0 .. 32760 range (12 bits << 3). */
1041
1042static int
1043emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1044 struct aarch64_register rn,
1045 struct aarch64_memory_operand operand)
1046{
1c2e1515 1047 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1048}
1049
1050/* Write a LDRB instruction into *BUF.
1051
1052 LDRB wt, [xn, #offset]
1053 LDRB wt, [xn, #index]!
1054 LDRB wt, [xn], #index
1055
1056 RT is the register to store.
1057 RN is the base address register.
1058 OFFSET is the immediate to add to the base address. It is limited to
1059 0 .. 32760 range (12 bits << 3). */
1060
1061static int
1062emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1063 struct aarch64_register rn,
1064 struct aarch64_memory_operand operand)
1065{
1c2e1515 1066 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1067}
1068
bb903df0 1069
bb903df0
PL
1070
1071/* Write a STR instruction into *BUF.
1072
1073 STR rt, [rn, #offset]
1074 STR rt, [rn, #index]!
afbe19f8 1075 STR rt, [rn], #index
bb903df0
PL
1076
1077 RT is the register to store.
1078 RN is the base address register.
1079 OFFSET is the immediate to add to the base address. It is limited to
1080 0 .. 32760 range (12 bits << 3). */
1081
1082static int
1083emit_str (uint32_t *buf, struct aarch64_register rt,
1084 struct aarch64_register rn,
1085 struct aarch64_memory_operand operand)
1086{
1c2e1515 1087 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1088}
1089
1090/* Helper function emitting an exclusive load or store instruction. */
1091
1092static int
1093emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1094 enum aarch64_opcodes opcode,
1095 struct aarch64_register rs,
1096 struct aarch64_register rt,
1097 struct aarch64_register rt2,
1098 struct aarch64_register rn)
1099{
e1c587c3
YQ
1100 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1101 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1102 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1103}
1104
1105/* Write a LAXR instruction into *BUF.
1106
1107 LDAXR rt, [xn]
1108
1109 RT is the destination register.
1110 RN is the base address register. */
1111
1112static int
1113emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1114 struct aarch64_register rn)
1115{
1116 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1117 xzr, rn);
1118}
1119
1120/* Write a STXR instruction into *BUF.
1121
1122 STXR ws, rt, [xn]
1123
1124 RS is the result register, it indicates if the store succeeded or not.
1125 RT is the destination register.
1126 RN is the base address register. */
1127
1128static int
1129emit_stxr (uint32_t *buf, struct aarch64_register rs,
1130 struct aarch64_register rt, struct aarch64_register rn)
1131{
1132 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1133 xzr, rn);
1134}
1135
1136/* Write a STLR instruction into *BUF.
1137
1138 STLR rt, [xn]
1139
1140 RT is the register to store.
1141 RN is the base address register. */
1142
1143static int
1144emit_stlr (uint32_t *buf, struct aarch64_register rt,
1145 struct aarch64_register rn)
1146{
1147 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1148 xzr, rn);
1149}
1150
1151/* Helper function for data processing instructions with register sources. */
1152
1153static int
231c0592 1154emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1155 struct aarch64_register rd,
1156 struct aarch64_register rn,
1157 struct aarch64_register rm)
1158{
1159 uint32_t size = ENCODE (rd.is64, 1, 31);
1160
e1c587c3
YQ
1161 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1162 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1163}
1164
1165/* Helper function for data processing instructions taking either a register
1166 or an immediate. */
1167
1168static int
1169emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1170 struct aarch64_register rd,
1171 struct aarch64_register rn,
1172 struct aarch64_operand operand)
1173{
1174 uint32_t size = ENCODE (rd.is64, 1, 31);
1175 /* The opcode is different for register and immediate source operands. */
1176 uint32_t operand_opcode;
1177
1178 if (operand.type == OPERAND_IMMEDIATE)
1179 {
1180 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1181 operand_opcode = ENCODE (8, 4, 25);
1182
e1c587c3
YQ
1183 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1184 | ENCODE (operand.imm, 12, 10)
1185 | ENCODE (rn.num, 5, 5)
1186 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1187 }
1188 else
1189 {
1190 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1191 operand_opcode = ENCODE (5, 4, 25);
1192
1193 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1194 rn, operand.reg);
1195 }
1196}
1197
1198/* Write an ADD instruction into *BUF.
1199
1200 ADD rd, rn, #imm
1201 ADD rd, rn, rm
1202
1203 This function handles both an immediate and register add.
1204
1205 RD is the destination register.
1206 RN is the input register.
1207 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1208 OPERAND_REGISTER. */
1209
1210static int
1211emit_add (uint32_t *buf, struct aarch64_register rd,
1212 struct aarch64_register rn, struct aarch64_operand operand)
1213{
1214 return emit_data_processing (buf, ADD, rd, rn, operand);
1215}
1216
1217/* Write a SUB instruction into *BUF.
1218
1219 SUB rd, rn, #imm
1220 SUB rd, rn, rm
1221
1222 This function handles both an immediate and register sub.
1223
1224 RD is the destination register.
1225 RN is the input register.
1226 IMM is the immediate to substract to RN. */
1227
1228static int
1229emit_sub (uint32_t *buf, struct aarch64_register rd,
1230 struct aarch64_register rn, struct aarch64_operand operand)
1231{
1232 return emit_data_processing (buf, SUB, rd, rn, operand);
1233}
1234
1235/* Write a MOV instruction into *BUF.
1236
1237 MOV rd, #imm
1238 MOV rd, rm
1239
1240 This function handles both a wide immediate move and a register move,
1241 with the condition that the source register is not xzr. xzr and the
1242 stack pointer share the same encoding and this function only supports
1243 the stack pointer.
1244
1245 RD is the destination register.
1246 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1247 OPERAND_REGISTER. */
1248
1249static int
1250emit_mov (uint32_t *buf, struct aarch64_register rd,
1251 struct aarch64_operand operand)
1252{
1253 if (operand.type == OPERAND_IMMEDIATE)
1254 {
1255 uint32_t size = ENCODE (rd.is64, 1, 31);
1256 /* Do not shift the immediate. */
1257 uint32_t shift = ENCODE (0, 2, 21);
1258
e1c587c3
YQ
1259 return aarch64_emit_insn (buf, MOV | size | shift
1260 | ENCODE (operand.imm, 16, 5)
1261 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1262 }
1263 else
1264 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1265}
1266
1267/* Write a MOVK instruction into *BUF.
1268
1269 MOVK rd, #imm, lsl #shift
1270
1271 RD is the destination register.
1272 IMM is the immediate.
1273 SHIFT is the logical shift left to apply to IMM. */
1274
1275static int
7781c06f
YQ
1276emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1277 unsigned shift)
bb903df0
PL
1278{
1279 uint32_t size = ENCODE (rd.is64, 1, 31);
1280
e1c587c3
YQ
1281 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1282 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1283}
1284
1285/* Write instructions into *BUF in order to move ADDR into a register.
1286 ADDR can be a 64-bit value.
1287
1288 This function will emit a series of MOV and MOVK instructions, such as:
1289
1290 MOV xd, #(addr)
1291 MOVK xd, #(addr >> 16), lsl #16
1292 MOVK xd, #(addr >> 32), lsl #32
1293 MOVK xd, #(addr >> 48), lsl #48 */
1294
1295static int
1296emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1297{
1298 uint32_t *p = buf;
1299
1300 /* The MOV (wide immediate) instruction clears to top bits of the
1301 register. */
1302 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1303
1304 if ((addr >> 16) != 0)
1305 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1306 else
1307 return p - buf;
1308
1309 if ((addr >> 32) != 0)
1310 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1311 else
1312 return p - buf;
1313
1314 if ((addr >> 48) != 0)
1315 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1316
1317 return p - buf;
1318}
1319
afbe19f8
PL
1320/* Write a SUBS instruction into *BUF.
1321
1322 SUBS rd, rn, rm
1323
1324 This instruction update the condition flags.
1325
1326 RD is the destination register.
1327 RN and RM are the source registers. */
1328
1329static int
1330emit_subs (uint32_t *buf, struct aarch64_register rd,
1331 struct aarch64_register rn, struct aarch64_operand operand)
1332{
1333 return emit_data_processing (buf, SUBS, rd, rn, operand);
1334}
1335
1336/* Write a CMP instruction into *BUF.
1337
1338 CMP rn, rm
1339
1340 This instruction is an alias of SUBS xzr, rn, rm.
1341
1342 RN and RM are the registers to compare. */
1343
1344static int
1345emit_cmp (uint32_t *buf, struct aarch64_register rn,
1346 struct aarch64_operand operand)
1347{
1348 return emit_subs (buf, xzr, rn, operand);
1349}
1350
1351/* Write a AND instruction into *BUF.
1352
1353 AND rd, rn, rm
1354
1355 RD is the destination register.
1356 RN and RM are the source registers. */
1357
1358static int
1359emit_and (uint32_t *buf, struct aarch64_register rd,
1360 struct aarch64_register rn, struct aarch64_register rm)
1361{
1362 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1363}
1364
1365/* Write a ORR instruction into *BUF.
1366
1367 ORR rd, rn, rm
1368
1369 RD is the destination register.
1370 RN and RM are the source registers. */
1371
1372static int
1373emit_orr (uint32_t *buf, struct aarch64_register rd,
1374 struct aarch64_register rn, struct aarch64_register rm)
1375{
1376 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1377}
1378
1379/* Write a ORN instruction into *BUF.
1380
1381 ORN rd, rn, rm
1382
1383 RD is the destination register.
1384 RN and RM are the source registers. */
1385
1386static int
1387emit_orn (uint32_t *buf, struct aarch64_register rd,
1388 struct aarch64_register rn, struct aarch64_register rm)
1389{
1390 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1391}
1392
1393/* Write a EOR instruction into *BUF.
1394
1395 EOR rd, rn, rm
1396
1397 RD is the destination register.
1398 RN and RM are the source registers. */
1399
1400static int
1401emit_eor (uint32_t *buf, struct aarch64_register rd,
1402 struct aarch64_register rn, struct aarch64_register rm)
1403{
1404 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1405}
1406
1407/* Write a MVN instruction into *BUF.
1408
1409 MVN rd, rm
1410
1411 This is an alias for ORN rd, xzr, rm.
1412
1413 RD is the destination register.
1414 RM is the source register. */
1415
1416static int
1417emit_mvn (uint32_t *buf, struct aarch64_register rd,
1418 struct aarch64_register rm)
1419{
1420 return emit_orn (buf, rd, xzr, rm);
1421}
1422
1423/* Write a LSLV instruction into *BUF.
1424
1425 LSLV rd, rn, rm
1426
1427 RD is the destination register.
1428 RN and RM are the source registers. */
1429
1430static int
1431emit_lslv (uint32_t *buf, struct aarch64_register rd,
1432 struct aarch64_register rn, struct aarch64_register rm)
1433{
1434 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1435}
1436
1437/* Write a LSRV instruction into *BUF.
1438
1439 LSRV rd, rn, rm
1440
1441 RD is the destination register.
1442 RN and RM are the source registers. */
1443
1444static int
1445emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1446 struct aarch64_register rn, struct aarch64_register rm)
1447{
1448 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1449}
1450
1451/* Write a ASRV instruction into *BUF.
1452
1453 ASRV rd, rn, rm
1454
1455 RD is the destination register.
1456 RN and RM are the source registers. */
1457
1458static int
1459emit_asrv (uint32_t *buf, struct aarch64_register rd,
1460 struct aarch64_register rn, struct aarch64_register rm)
1461{
1462 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1463}
1464
1465/* Write a MUL instruction into *BUF.
1466
1467 MUL rd, rn, rm
1468
1469 RD is the destination register.
1470 RN and RM are the source registers. */
1471
1472static int
1473emit_mul (uint32_t *buf, struct aarch64_register rd,
1474 struct aarch64_register rn, struct aarch64_register rm)
1475{
1476 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1477}
1478
bb903df0
PL
1479/* Write a MRS instruction into *BUF. The register size is 64-bit.
1480
1481 MRS xt, system_reg
1482
1483 RT is the destination register.
1484 SYSTEM_REG is special purpose register to read. */
1485
1486static int
1487emit_mrs (uint32_t *buf, struct aarch64_register rt,
1488 enum aarch64_system_control_registers system_reg)
1489{
e1c587c3
YQ
1490 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1491 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1492}
1493
1494/* Write a MSR instruction into *BUF. The register size is 64-bit.
1495
1496 MSR system_reg, xt
1497
1498 SYSTEM_REG is special purpose register to write.
1499 RT is the input register. */
1500
1501static int
1502emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1503 struct aarch64_register rt)
1504{
e1c587c3
YQ
1505 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1506 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1507}
1508
1509/* Write a SEVL instruction into *BUF.
1510
1511 This is a hint instruction telling the hardware to trigger an event. */
1512
1513static int
1514emit_sevl (uint32_t *buf)
1515{
e1c587c3 1516 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1517}
1518
1519/* Write a WFE instruction into *BUF.
1520
1521 This is a hint instruction telling the hardware to wait for an event. */
1522
1523static int
1524emit_wfe (uint32_t *buf)
1525{
e1c587c3 1526 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1527}
1528
afbe19f8
PL
1529/* Write a SBFM instruction into *BUF.
1530
1531 SBFM rd, rn, #immr, #imms
1532
1533 This instruction moves the bits from #immr to #imms into the
1534 destination, sign extending the result.
1535
1536 RD is the destination register.
1537 RN is the source register.
1538 IMMR is the bit number to start at (least significant bit).
1539 IMMS is the bit number to stop at (most significant bit). */
1540
1541static int
1542emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1543 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1544{
1545 uint32_t size = ENCODE (rd.is64, 1, 31);
1546 uint32_t n = ENCODE (rd.is64, 1, 22);
1547
e1c587c3
YQ
1548 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1549 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1550 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1551}
1552
1553/* Write a SBFX instruction into *BUF.
1554
1555 SBFX rd, rn, #lsb, #width
1556
1557 This instruction moves #width bits from #lsb into the destination, sign
1558 extending the result. This is an alias for:
1559
1560 SBFM rd, rn, #lsb, #(lsb + width - 1)
1561
1562 RD is the destination register.
1563 RN is the source register.
1564 LSB is the bit number to start at (least significant bit).
1565 WIDTH is the number of bits to move. */
1566
1567static int
1568emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1569 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1570{
1571 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1572}
1573
1574/* Write a UBFM instruction into *BUF.
1575
1576 UBFM rd, rn, #immr, #imms
1577
1578 This instruction moves the bits from #immr to #imms into the
1579 destination, extending the result with zeros.
1580
1581 RD is the destination register.
1582 RN is the source register.
1583 IMMR is the bit number to start at (least significant bit).
1584 IMMS is the bit number to stop at (most significant bit). */
1585
1586static int
1587emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1588 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1589{
1590 uint32_t size = ENCODE (rd.is64, 1, 31);
1591 uint32_t n = ENCODE (rd.is64, 1, 22);
1592
e1c587c3
YQ
1593 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1594 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1595 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1596}
1597
1598/* Write a UBFX instruction into *BUF.
1599
1600 UBFX rd, rn, #lsb, #width
1601
1602 This instruction moves #width bits from #lsb into the destination,
1603 extending the result with zeros. This is an alias for:
1604
1605 UBFM rd, rn, #lsb, #(lsb + width - 1)
1606
1607 RD is the destination register.
1608 RN is the source register.
1609 LSB is the bit number to start at (least significant bit).
1610 WIDTH is the number of bits to move. */
1611
1612static int
1613emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1614 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1615{
1616 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1617}
1618
1619/* Write a CSINC instruction into *BUF.
1620
1621 CSINC rd, rn, rm, cond
1622
1623 This instruction conditionally increments rn or rm and places the result
1624 in rd. rn is chosen is the condition is true.
1625
1626 RD is the destination register.
1627 RN and RM are the source registers.
1628 COND is the encoded condition. */
1629
1630static int
1631emit_csinc (uint32_t *buf, struct aarch64_register rd,
1632 struct aarch64_register rn, struct aarch64_register rm,
1633 unsigned cond)
1634{
1635 uint32_t size = ENCODE (rd.is64, 1, 31);
1636
e1c587c3
YQ
1637 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1638 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1639 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1640}
1641
1642/* Write a CSET instruction into *BUF.
1643
1644 CSET rd, cond
1645
1646 This instruction conditionally write 1 or 0 in the destination register.
1647 1 is written if the condition is true. This is an alias for:
1648
1649 CSINC rd, xzr, xzr, !cond
1650
1651 Note that the condition needs to be inverted.
1652
1653 RD is the destination register.
1654 RN and RM are the source registers.
1655 COND is the encoded condition. */
1656
1657static int
1658emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1659{
1660 /* The least significant bit of the condition needs toggling in order to
1661 invert it. */
1662 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1663}
1664
bb903df0
PL
1665/* Write LEN instructions from BUF into the inferior memory at *TO.
1666
1667 Note instructions are always little endian on AArch64, unlike data. */
1668
1669static void
1670append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1671{
1672 size_t byte_len = len * sizeof (uint32_t);
1673#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1674 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1675 size_t i;
1676
1677 for (i = 0; i < len; i++)
1678 le_buf[i] = htole32 (buf[i]);
1679
4196ab2a 1680 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1681
1682 xfree (le_buf);
1683#else
4196ab2a 1684 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1685#endif
1686
1687 *to += byte_len;
1688}
1689
0badd99f
YQ
1690/* Sub-class of struct aarch64_insn_data, store information of
1691 instruction relocation for fast tracepoint. Visitor can
1692 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1693 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1694
0badd99f
YQ
1695struct aarch64_insn_relocation_data
1696{
1697 struct aarch64_insn_data base;
1698
1699 /* The new address the instruction is relocated to. */
1700 CORE_ADDR new_addr;
1701 /* Pointer to the buffer of relocated instruction(s). */
1702 uint32_t *insn_ptr;
1703};
1704
1705/* Implementation of aarch64_insn_visitor method "b". */
1706
1707static void
1708aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1709 struct aarch64_insn_data *data)
1710{
1711 struct aarch64_insn_relocation_data *insn_reloc
1712 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1713 int64_t new_offset
0badd99f
YQ
1714 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1715
1716 if (can_encode_int32 (new_offset, 28))
1717 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1718}
1719
1720/* Implementation of aarch64_insn_visitor method "b_cond". */
1721
1722static void
1723aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1724 struct aarch64_insn_data *data)
1725{
1726 struct aarch64_insn_relocation_data *insn_reloc
1727 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1728 int64_t new_offset
0badd99f
YQ
1729 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1730
1731 if (can_encode_int32 (new_offset, 21))
1732 {
1733 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1734 new_offset);
bb903df0 1735 }
0badd99f 1736 else if (can_encode_int32 (new_offset, 28))
bb903df0 1737 {
0badd99f
YQ
1738 /* The offset is out of range for a conditional branch
1739 instruction but not for a unconditional branch. We can use
1740 the following instructions instead:
bb903df0 1741
0badd99f
YQ
1742 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1743 B NOT_TAKEN ; Else jump over TAKEN and continue.
1744 TAKEN:
1745 B #(offset - 8)
1746 NOT_TAKEN:
1747
1748 */
1749
1750 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1751 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1752 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1753 }
0badd99f 1754}
bb903df0 1755
0badd99f
YQ
1756/* Implementation of aarch64_insn_visitor method "cb". */
1757
1758static void
1759aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1760 const unsigned rn, int is64,
1761 struct aarch64_insn_data *data)
1762{
1763 struct aarch64_insn_relocation_data *insn_reloc
1764 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1765 int64_t new_offset
0badd99f
YQ
1766 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1767
1768 if (can_encode_int32 (new_offset, 21))
1769 {
1770 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1771 aarch64_register (rn, is64), new_offset);
bb903df0 1772 }
0badd99f 1773 else if (can_encode_int32 (new_offset, 28))
bb903df0 1774 {
0badd99f
YQ
1775 /* The offset is out of range for a compare and branch
1776 instruction but not for a unconditional branch. We can use
1777 the following instructions instead:
1778
1779 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1780 B NOT_TAKEN ; Else jump over TAKEN and continue.
1781 TAKEN:
1782 B #(offset - 8)
1783 NOT_TAKEN:
1784
1785 */
1786 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1787 aarch64_register (rn, is64), 8);
1788 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1789 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1790 }
1791}
bb903df0 1792
0badd99f 1793/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1794
0badd99f
YQ
1795static void
1796aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1797 const unsigned rt, unsigned bit,
1798 struct aarch64_insn_data *data)
1799{
1800 struct aarch64_insn_relocation_data *insn_reloc
1801 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1802 int64_t new_offset
0badd99f
YQ
1803 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1804
1805 if (can_encode_int32 (new_offset, 16))
1806 {
1807 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1808 aarch64_register (rt, 1), new_offset);
bb903df0 1809 }
0badd99f 1810 else if (can_encode_int32 (new_offset, 28))
bb903df0 1811 {
0badd99f
YQ
1812 /* The offset is out of range for a test bit and branch
1813 instruction but not for a unconditional branch. We can use
1814 the following instructions instead:
1815
1816 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1817 B NOT_TAKEN ; Else jump over TAKEN and continue.
1818 TAKEN:
1819 B #(offset - 8)
1820 NOT_TAKEN:
1821
1822 */
1823 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1824 aarch64_register (rt, 1), 8);
1825 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1826 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1827 new_offset - 8);
1828 }
1829}
bb903df0 1830
0badd99f 1831/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1832
0badd99f
YQ
1833static void
1834aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1835 const int is_adrp,
1836 struct aarch64_insn_data *data)
1837{
1838 struct aarch64_insn_relocation_data *insn_reloc
1839 = (struct aarch64_insn_relocation_data *) data;
1840 /* We know exactly the address the ADR{P,} instruction will compute.
1841 We can just write it to the destination register. */
1842 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1843
0badd99f
YQ
1844 if (is_adrp)
1845 {
1846 /* Clear the lower 12 bits of the offset to get the 4K page. */
1847 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1848 aarch64_register (rd, 1),
1849 address & ~0xfff);
1850 }
1851 else
1852 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1853 aarch64_register (rd, 1), address);
1854}
bb903df0 1855
0badd99f 1856/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1857
0badd99f
YQ
1858static void
1859aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1860 const unsigned rt, const int is64,
1861 struct aarch64_insn_data *data)
1862{
1863 struct aarch64_insn_relocation_data *insn_reloc
1864 = (struct aarch64_insn_relocation_data *) data;
1865 CORE_ADDR address = data->insn_addr + offset;
1866
1867 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1868 aarch64_register (rt, 1), address);
1869
1870 /* We know exactly what address to load from, and what register we
1871 can use:
1872
1873 MOV xd, #(oldloc + offset)
1874 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1875 ...
1876
1877 LDR xd, [xd] ; or LDRSW xd, [xd]
1878
1879 */
1880
1881 if (is_sw)
1882 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1883 aarch64_register (rt, 1),
1884 aarch64_register (rt, 1),
1885 offset_memory_operand (0));
bb903df0 1886 else
0badd99f
YQ
1887 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1888 aarch64_register (rt, is64),
1889 aarch64_register (rt, 1),
1890 offset_memory_operand (0));
1891}
1892
1893/* Implementation of aarch64_insn_visitor method "others". */
1894
1895static void
1896aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1897 struct aarch64_insn_data *data)
1898{
1899 struct aarch64_insn_relocation_data *insn_reloc
1900 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1901
0badd99f
YQ
1902 /* The instruction is not PC relative. Just re-emit it at the new
1903 location. */
e1c587c3 1904 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1905}
1906
1907static const struct aarch64_insn_visitor visitor =
1908{
1909 aarch64_ftrace_insn_reloc_b,
1910 aarch64_ftrace_insn_reloc_b_cond,
1911 aarch64_ftrace_insn_reloc_cb,
1912 aarch64_ftrace_insn_reloc_tb,
1913 aarch64_ftrace_insn_reloc_adr,
1914 aarch64_ftrace_insn_reloc_ldr_literal,
1915 aarch64_ftrace_insn_reloc_others,
1916};
1917
bb903df0
PL
1918/* Implementation of linux_target_ops method
1919 "install_fast_tracepoint_jump_pad". */
1920
1921static int
1922aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1923 CORE_ADDR tpaddr,
1924 CORE_ADDR collector,
1925 CORE_ADDR lockaddr,
1926 ULONGEST orig_size,
1927 CORE_ADDR *jump_entry,
1928 CORE_ADDR *trampoline,
1929 ULONGEST *trampoline_size,
1930 unsigned char *jjump_pad_insn,
1931 ULONGEST *jjump_pad_insn_size,
1932 CORE_ADDR *adjusted_insn_addr,
1933 CORE_ADDR *adjusted_insn_addr_end,
1934 char *err)
1935{
1936 uint32_t buf[256];
1937 uint32_t *p = buf;
2ac09a5b 1938 int64_t offset;
bb903df0 1939 int i;
70b439f0 1940 uint32_t insn;
bb903df0 1941 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1942 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1943
1944 /* We need to save the current state on the stack both to restore it
1945 later and to collect register values when the tracepoint is hit.
1946
1947 The saved registers are pushed in a layout that needs to be in sync
1948 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1949 the supply_fast_tracepoint_registers function will fill in the
1950 register cache from a pointer to saved registers on the stack we build
1951 here.
1952
1953 For simplicity, we set the size of each cell on the stack to 16 bytes.
1954 This way one cell can hold any register type, from system registers
1955 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1956 has to be 16 bytes aligned anyway.
1957
1958 Note that the CPSR register does not exist on AArch64. Instead we
1959 can access system bits describing the process state with the
1960 MRS/MSR instructions, namely the condition flags. We save them as
1961 if they are part of a CPSR register because that's how GDB
1962 interprets these system bits. At the moment, only the condition
1963 flags are saved in CPSR (NZCV).
1964
1965 Stack layout, each cell is 16 bytes (descending):
1966
1967 High *-------- SIMD&FP registers from 31 down to 0. --------*
1968 | q31 |
1969 . .
1970 . . 32 cells
1971 . .
1972 | q0 |
1973 *---- General purpose registers from 30 down to 0. ----*
1974 | x30 |
1975 . .
1976 . . 31 cells
1977 . .
1978 | x0 |
1979 *------------- Special purpose registers. -------------*
1980 | SP |
1981 | PC |
1982 | CPSR (NZCV) | 5 cells
1983 | FPSR |
1984 | FPCR | <- SP + 16
1985 *------------- collecting_t object --------------------*
1986 | TPIDR_EL0 | struct tracepoint * |
1987 Low *------------------------------------------------------*
1988
1989 After this stack is set up, we issue a call to the collector, passing
1990 it the saved registers at (SP + 16). */
1991
1992 /* Push SIMD&FP registers on the stack:
1993
1994 SUB sp, sp, #(32 * 16)
1995
1996 STP q30, q31, [sp, #(30 * 16)]
1997 ...
1998 STP q0, q1, [sp]
1999
2000 */
2001 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2002 for (i = 30; i >= 0; i -= 2)
2003 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2004
30baf67b 2005 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2006 to push x31 as it represents the xzr register and not the stack
2007 pointer in a STR instruction.
2008
2009 SUB sp, sp, #(31 * 16)
2010
2011 STR x30, [sp, #(30 * 16)]
2012 ...
2013 STR x0, [sp]
2014
2015 */
2016 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2017 for (i = 30; i >= 0; i -= 1)
2018 p += emit_str (p, aarch64_register (i, 1), sp,
2019 offset_memory_operand (i * 16));
2020
2021 /* Make space for 5 more cells.
2022
2023 SUB sp, sp, #(5 * 16)
2024
2025 */
2026 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2027
2028
2029 /* Save SP:
2030
2031 ADD x4, sp, #((32 + 31 + 5) * 16)
2032 STR x4, [sp, #(4 * 16)]
2033
2034 */
2035 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2036 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2037
2038 /* Save PC (tracepoint address):
2039
2040 MOV x3, #(tpaddr)
2041 ...
2042
2043 STR x3, [sp, #(3 * 16)]
2044
2045 */
2046
2047 p += emit_mov_addr (p, x3, tpaddr);
2048 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2049
2050 /* Save CPSR (NZCV), FPSR and FPCR:
2051
2052 MRS x2, nzcv
2053 MRS x1, fpsr
2054 MRS x0, fpcr
2055
2056 STR x2, [sp, #(2 * 16)]
2057 STR x1, [sp, #(1 * 16)]
2058 STR x0, [sp, #(0 * 16)]
2059
2060 */
2061 p += emit_mrs (p, x2, NZCV);
2062 p += emit_mrs (p, x1, FPSR);
2063 p += emit_mrs (p, x0, FPCR);
2064 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2065 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2066 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2067
2068 /* Push the collecting_t object. It consist of the address of the
2069 tracepoint and an ID for the current thread. We get the latter by
2070 reading the tpidr_el0 system register. It corresponds to the
2071 NT_ARM_TLS register accessible with ptrace.
2072
2073 MOV x0, #(tpoint)
2074 ...
2075
2076 MRS x1, tpidr_el0
2077
2078 STP x0, x1, [sp, #-16]!
2079
2080 */
2081
2082 p += emit_mov_addr (p, x0, tpoint);
2083 p += emit_mrs (p, x1, TPIDR_EL0);
2084 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2085
2086 /* Spin-lock:
2087
2088 The shared memory for the lock is at lockaddr. It will hold zero
2089 if no-one is holding the lock, otherwise it contains the address of
2090 the collecting_t object on the stack of the thread which acquired it.
2091
2092 At this stage, the stack pointer points to this thread's collecting_t
2093 object.
2094
2095 We use the following registers:
2096 - x0: Address of the lock.
2097 - x1: Pointer to collecting_t object.
2098 - x2: Scratch register.
2099
2100 MOV x0, #(lockaddr)
2101 ...
2102 MOV x1, sp
2103
2104 ; Trigger an event local to this core. So the following WFE
2105 ; instruction is ignored.
2106 SEVL
2107 again:
2108 ; Wait for an event. The event is triggered by either the SEVL
2109 ; or STLR instructions (store release).
2110 WFE
2111
2112 ; Atomically read at lockaddr. This marks the memory location as
2113 ; exclusive. This instruction also has memory constraints which
2114 ; make sure all previous data reads and writes are done before
2115 ; executing it.
2116 LDAXR x2, [x0]
2117
2118 ; Try again if another thread holds the lock.
2119 CBNZ x2, again
2120
2121 ; We can lock it! Write the address of the collecting_t object.
2122 ; This instruction will fail if the memory location is not marked
2123 ; as exclusive anymore. If it succeeds, it will remove the
2124 ; exclusive mark on the memory location. This way, if another
2125 ; thread executes this instruction before us, we will fail and try
2126 ; all over again.
2127 STXR w2, x1, [x0]
2128 CBNZ w2, again
2129
2130 */
2131
2132 p += emit_mov_addr (p, x0, lockaddr);
2133 p += emit_mov (p, x1, register_operand (sp));
2134
2135 p += emit_sevl (p);
2136 p += emit_wfe (p);
2137 p += emit_ldaxr (p, x2, x0);
2138 p += emit_cb (p, 1, w2, -2 * 4);
2139 p += emit_stxr (p, w2, x1, x0);
2140 p += emit_cb (p, 1, x2, -4 * 4);
2141
2142 /* Call collector (struct tracepoint *, unsigned char *):
2143
2144 MOV x0, #(tpoint)
2145 ...
2146
2147 ; Saved registers start after the collecting_t object.
2148 ADD x1, sp, #16
2149
2150 ; We use an intra-procedure-call scratch register.
2151 MOV ip0, #(collector)
2152 ...
2153
2154 ; And call back to C!
2155 BLR ip0
2156
2157 */
2158
2159 p += emit_mov_addr (p, x0, tpoint);
2160 p += emit_add (p, x1, sp, immediate_operand (16));
2161
2162 p += emit_mov_addr (p, ip0, collector);
2163 p += emit_blr (p, ip0);
2164
2165 /* Release the lock.
2166
2167 MOV x0, #(lockaddr)
2168 ...
2169
2170 ; This instruction is a normal store with memory ordering
2171 ; constraints. Thanks to this we do not have to put a data
2172 ; barrier instruction to make sure all data read and writes are done
30baf67b 2173 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2174 ; will trigger an event, letting other threads know they can grab
2175 ; the lock.
2176 STLR xzr, [x0]
2177
2178 */
2179 p += emit_mov_addr (p, x0, lockaddr);
2180 p += emit_stlr (p, xzr, x0);
2181
2182 /* Free collecting_t object:
2183
2184 ADD sp, sp, #16
2185
2186 */
2187 p += emit_add (p, sp, sp, immediate_operand (16));
2188
2189 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2190 registers from the stack.
2191
2192 LDR x2, [sp, #(2 * 16)]
2193 LDR x1, [sp, #(1 * 16)]
2194 LDR x0, [sp, #(0 * 16)]
2195
2196 MSR NZCV, x2
2197 MSR FPSR, x1
2198 MSR FPCR, x0
2199
2200 ADD sp, sp #(5 * 16)
2201
2202 */
2203 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2204 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2205 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2206 p += emit_msr (p, NZCV, x2);
2207 p += emit_msr (p, FPSR, x1);
2208 p += emit_msr (p, FPCR, x0);
2209
2210 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2211
2212 /* Pop general purpose registers:
2213
2214 LDR x0, [sp]
2215 ...
2216 LDR x30, [sp, #(30 * 16)]
2217
2218 ADD sp, sp, #(31 * 16)
2219
2220 */
2221 for (i = 0; i <= 30; i += 1)
2222 p += emit_ldr (p, aarch64_register (i, 1), sp,
2223 offset_memory_operand (i * 16));
2224 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2225
2226 /* Pop SIMD&FP registers:
2227
2228 LDP q0, q1, [sp]
2229 ...
2230 LDP q30, q31, [sp, #(30 * 16)]
2231
2232 ADD sp, sp, #(32 * 16)
2233
2234 */
2235 for (i = 0; i <= 30; i += 2)
2236 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2237 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2238
2239 /* Write the code into the inferior memory. */
2240 append_insns (&buildaddr, p - buf, buf);
2241
2242 /* Now emit the relocated instruction. */
2243 *adjusted_insn_addr = buildaddr;
70b439f0 2244 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2245
2246 insn_data.base.insn_addr = tpaddr;
2247 insn_data.new_addr = buildaddr;
2248 insn_data.insn_ptr = buf;
2249
2250 aarch64_relocate_instruction (insn, &visitor,
2251 (struct aarch64_insn_data *) &insn_data);
2252
bb903df0 2253 /* We may not have been able to relocate the instruction. */
0badd99f 2254 if (insn_data.insn_ptr == buf)
bb903df0
PL
2255 {
2256 sprintf (err,
2257 "E.Could not relocate instruction from %s to %s.",
2258 core_addr_to_string_nz (tpaddr),
2259 core_addr_to_string_nz (buildaddr));
2260 return 1;
2261 }
dfaffe9d 2262 else
0badd99f 2263 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2264 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2265
2266 /* Go back to the start of the buffer. */
2267 p = buf;
2268
2269 /* Emit a branch back from the jump pad. */
2270 offset = (tpaddr + orig_size - buildaddr);
2271 if (!can_encode_int32 (offset, 28))
2272 {
2273 sprintf (err,
2274 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2275 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2276 offset);
2277 return 1;
2278 }
2279
2280 p += emit_b (p, 0, offset);
2281 append_insns (&buildaddr, p - buf, buf);
2282
2283 /* Give the caller a branch instruction into the jump pad. */
2284 offset = (*jump_entry - tpaddr);
2285 if (!can_encode_int32 (offset, 28))
2286 {
2287 sprintf (err,
2288 "E.Jump pad too far from tracepoint "
2ac09a5b 2289 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2290 offset);
2291 return 1;
2292 }
2293
2294 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2295 *jjump_pad_insn_size = 4;
2296
2297 /* Return the end address of our pad. */
2298 *jump_entry = buildaddr;
2299
2300 return 0;
2301}
2302
afbe19f8
PL
2303/* Helper function writing LEN instructions from START into
2304 current_insn_ptr. */
2305
2306static void
2307emit_ops_insns (const uint32_t *start, int len)
2308{
2309 CORE_ADDR buildaddr = current_insn_ptr;
2310
2311 if (debug_threads)
2312 debug_printf ("Adding %d instrucions at %s\n",
2313 len, paddress (buildaddr));
2314
2315 append_insns (&buildaddr, len, start);
2316 current_insn_ptr = buildaddr;
2317}
2318
2319/* Pop a register from the stack. */
2320
2321static int
2322emit_pop (uint32_t *buf, struct aarch64_register rt)
2323{
2324 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2325}
2326
2327/* Push a register on the stack. */
2328
2329static int
2330emit_push (uint32_t *buf, struct aarch64_register rt)
2331{
2332 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2333}
2334
2335/* Implementation of emit_ops method "emit_prologue". */
2336
2337static void
2338aarch64_emit_prologue (void)
2339{
2340 uint32_t buf[16];
2341 uint32_t *p = buf;
2342
2343 /* This function emit a prologue for the following function prototype:
2344
2345 enum eval_result_type f (unsigned char *regs,
2346 ULONGEST *value);
2347
2348 The first argument is a buffer of raw registers. The second
2349 argument is the result of
2350 evaluating the expression, which will be set to whatever is on top of
2351 the stack at the end.
2352
2353 The stack set up by the prologue is as such:
2354
2355 High *------------------------------------------------------*
2356 | LR |
2357 | FP | <- FP
2358 | x1 (ULONGEST *value) |
2359 | x0 (unsigned char *regs) |
2360 Low *------------------------------------------------------*
2361
2362 As we are implementing a stack machine, each opcode can expand the
2363 stack so we never know how far we are from the data saved by this
2364 prologue. In order to be able refer to value and regs later, we save
2365 the current stack pointer in the frame pointer. This way, it is not
2366 clobbered when calling C functions.
2367
30baf67b 2368 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2369 top of the stack, and x1 as a scratch register. */
2370
2371 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2372 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2373 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2374
2375 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2376
2377
2378 emit_ops_insns (buf, p - buf);
2379}
2380
2381/* Implementation of emit_ops method "emit_epilogue". */
2382
2383static void
2384aarch64_emit_epilogue (void)
2385{
2386 uint32_t buf[16];
2387 uint32_t *p = buf;
2388
2389 /* Store the result of the expression (x0) in *value. */
2390 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2391 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2392 p += emit_str (p, x0, x1, offset_memory_operand (0));
2393
2394 /* Restore the previous state. */
2395 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2396 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2397
2398 /* Return expr_eval_no_error. */
2399 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2400 p += emit_ret (p, lr);
2401
2402 emit_ops_insns (buf, p - buf);
2403}
2404
2405/* Implementation of emit_ops method "emit_add". */
2406
2407static void
2408aarch64_emit_add (void)
2409{
2410 uint32_t buf[16];
2411 uint32_t *p = buf;
2412
2413 p += emit_pop (p, x1);
45e3745e 2414 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2415
2416 emit_ops_insns (buf, p - buf);
2417}
2418
2419/* Implementation of emit_ops method "emit_sub". */
2420
2421static void
2422aarch64_emit_sub (void)
2423{
2424 uint32_t buf[16];
2425 uint32_t *p = buf;
2426
2427 p += emit_pop (p, x1);
45e3745e 2428 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2429
2430 emit_ops_insns (buf, p - buf);
2431}
2432
2433/* Implementation of emit_ops method "emit_mul". */
2434
2435static void
2436aarch64_emit_mul (void)
2437{
2438 uint32_t buf[16];
2439 uint32_t *p = buf;
2440
2441 p += emit_pop (p, x1);
2442 p += emit_mul (p, x0, x1, x0);
2443
2444 emit_ops_insns (buf, p - buf);
2445}
2446
2447/* Implementation of emit_ops method "emit_lsh". */
2448
2449static void
2450aarch64_emit_lsh (void)
2451{
2452 uint32_t buf[16];
2453 uint32_t *p = buf;
2454
2455 p += emit_pop (p, x1);
2456 p += emit_lslv (p, x0, x1, x0);
2457
2458 emit_ops_insns (buf, p - buf);
2459}
2460
2461/* Implementation of emit_ops method "emit_rsh_signed". */
2462
2463static void
2464aarch64_emit_rsh_signed (void)
2465{
2466 uint32_t buf[16];
2467 uint32_t *p = buf;
2468
2469 p += emit_pop (p, x1);
2470 p += emit_asrv (p, x0, x1, x0);
2471
2472 emit_ops_insns (buf, p - buf);
2473}
2474
2475/* Implementation of emit_ops method "emit_rsh_unsigned". */
2476
2477static void
2478aarch64_emit_rsh_unsigned (void)
2479{
2480 uint32_t buf[16];
2481 uint32_t *p = buf;
2482
2483 p += emit_pop (p, x1);
2484 p += emit_lsrv (p, x0, x1, x0);
2485
2486 emit_ops_insns (buf, p - buf);
2487}
2488
2489/* Implementation of emit_ops method "emit_ext". */
2490
2491static void
2492aarch64_emit_ext (int arg)
2493{
2494 uint32_t buf[16];
2495 uint32_t *p = buf;
2496
2497 p += emit_sbfx (p, x0, x0, 0, arg);
2498
2499 emit_ops_insns (buf, p - buf);
2500}
2501
2502/* Implementation of emit_ops method "emit_log_not". */
2503
2504static void
2505aarch64_emit_log_not (void)
2506{
2507 uint32_t buf[16];
2508 uint32_t *p = buf;
2509
2510 /* If the top of the stack is 0, replace it with 1. Else replace it with
2511 0. */
2512
2513 p += emit_cmp (p, x0, immediate_operand (0));
2514 p += emit_cset (p, x0, EQ);
2515
2516 emit_ops_insns (buf, p - buf);
2517}
2518
2519/* Implementation of emit_ops method "emit_bit_and". */
2520
2521static void
2522aarch64_emit_bit_and (void)
2523{
2524 uint32_t buf[16];
2525 uint32_t *p = buf;
2526
2527 p += emit_pop (p, x1);
2528 p += emit_and (p, x0, x0, x1);
2529
2530 emit_ops_insns (buf, p - buf);
2531}
2532
2533/* Implementation of emit_ops method "emit_bit_or". */
2534
2535static void
2536aarch64_emit_bit_or (void)
2537{
2538 uint32_t buf[16];
2539 uint32_t *p = buf;
2540
2541 p += emit_pop (p, x1);
2542 p += emit_orr (p, x0, x0, x1);
2543
2544 emit_ops_insns (buf, p - buf);
2545}
2546
2547/* Implementation of emit_ops method "emit_bit_xor". */
2548
2549static void
2550aarch64_emit_bit_xor (void)
2551{
2552 uint32_t buf[16];
2553 uint32_t *p = buf;
2554
2555 p += emit_pop (p, x1);
2556 p += emit_eor (p, x0, x0, x1);
2557
2558 emit_ops_insns (buf, p - buf);
2559}
2560
2561/* Implementation of emit_ops method "emit_bit_not". */
2562
2563static void
2564aarch64_emit_bit_not (void)
2565{
2566 uint32_t buf[16];
2567 uint32_t *p = buf;
2568
2569 p += emit_mvn (p, x0, x0);
2570
2571 emit_ops_insns (buf, p - buf);
2572}
2573
2574/* Implementation of emit_ops method "emit_equal". */
2575
2576static void
2577aarch64_emit_equal (void)
2578{
2579 uint32_t buf[16];
2580 uint32_t *p = buf;
2581
2582 p += emit_pop (p, x1);
2583 p += emit_cmp (p, x0, register_operand (x1));
2584 p += emit_cset (p, x0, EQ);
2585
2586 emit_ops_insns (buf, p - buf);
2587}
2588
2589/* Implementation of emit_ops method "emit_less_signed". */
2590
2591static void
2592aarch64_emit_less_signed (void)
2593{
2594 uint32_t buf[16];
2595 uint32_t *p = buf;
2596
2597 p += emit_pop (p, x1);
2598 p += emit_cmp (p, x1, register_operand (x0));
2599 p += emit_cset (p, x0, LT);
2600
2601 emit_ops_insns (buf, p - buf);
2602}
2603
2604/* Implementation of emit_ops method "emit_less_unsigned". */
2605
2606static void
2607aarch64_emit_less_unsigned (void)
2608{
2609 uint32_t buf[16];
2610 uint32_t *p = buf;
2611
2612 p += emit_pop (p, x1);
2613 p += emit_cmp (p, x1, register_operand (x0));
2614 p += emit_cset (p, x0, LO);
2615
2616 emit_ops_insns (buf, p - buf);
2617}
2618
2619/* Implementation of emit_ops method "emit_ref". */
2620
2621static void
2622aarch64_emit_ref (int size)
2623{
2624 uint32_t buf[16];
2625 uint32_t *p = buf;
2626
2627 switch (size)
2628 {
2629 case 1:
2630 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2631 break;
2632 case 2:
2633 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2634 break;
2635 case 4:
2636 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2637 break;
2638 case 8:
2639 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2640 break;
2641 default:
2642 /* Unknown size, bail on compilation. */
2643 emit_error = 1;
2644 break;
2645 }
2646
2647 emit_ops_insns (buf, p - buf);
2648}
2649
2650/* Implementation of emit_ops method "emit_if_goto". */
2651
2652static void
2653aarch64_emit_if_goto (int *offset_p, int *size_p)
2654{
2655 uint32_t buf[16];
2656 uint32_t *p = buf;
2657
2658 /* The Z flag is set or cleared here. */
2659 p += emit_cmp (p, x0, immediate_operand (0));
2660 /* This instruction must not change the Z flag. */
2661 p += emit_pop (p, x0);
2662 /* Branch over the next instruction if x0 == 0. */
2663 p += emit_bcond (p, EQ, 8);
2664
2665 /* The NOP instruction will be patched with an unconditional branch. */
2666 if (offset_p)
2667 *offset_p = (p - buf) * 4;
2668 if (size_p)
2669 *size_p = 4;
2670 p += emit_nop (p);
2671
2672 emit_ops_insns (buf, p - buf);
2673}
2674
2675/* Implementation of emit_ops method "emit_goto". */
2676
2677static void
2678aarch64_emit_goto (int *offset_p, int *size_p)
2679{
2680 uint32_t buf[16];
2681 uint32_t *p = buf;
2682
2683 /* The NOP instruction will be patched with an unconditional branch. */
2684 if (offset_p)
2685 *offset_p = 0;
2686 if (size_p)
2687 *size_p = 4;
2688 p += emit_nop (p);
2689
2690 emit_ops_insns (buf, p - buf);
2691}
2692
2693/* Implementation of emit_ops method "write_goto_address". */
2694
bb1183e2 2695static void
afbe19f8
PL
2696aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2697{
2698 uint32_t insn;
2699
2700 emit_b (&insn, 0, to - from);
2701 append_insns (&from, 1, &insn);
2702}
2703
2704/* Implementation of emit_ops method "emit_const". */
2705
2706static void
2707aarch64_emit_const (LONGEST num)
2708{
2709 uint32_t buf[16];
2710 uint32_t *p = buf;
2711
2712 p += emit_mov_addr (p, x0, num);
2713
2714 emit_ops_insns (buf, p - buf);
2715}
2716
2717/* Implementation of emit_ops method "emit_call". */
2718
2719static void
2720aarch64_emit_call (CORE_ADDR fn)
2721{
2722 uint32_t buf[16];
2723 uint32_t *p = buf;
2724
2725 p += emit_mov_addr (p, ip0, fn);
2726 p += emit_blr (p, ip0);
2727
2728 emit_ops_insns (buf, p - buf);
2729}
2730
2731/* Implementation of emit_ops method "emit_reg". */
2732
2733static void
2734aarch64_emit_reg (int reg)
2735{
2736 uint32_t buf[16];
2737 uint32_t *p = buf;
2738
2739 /* Set x0 to unsigned char *regs. */
2740 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2741 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2742 p += emit_mov (p, x1, immediate_operand (reg));
2743
2744 emit_ops_insns (buf, p - buf);
2745
2746 aarch64_emit_call (get_raw_reg_func_addr ());
2747}
2748
2749/* Implementation of emit_ops method "emit_pop". */
2750
2751static void
2752aarch64_emit_pop (void)
2753{
2754 uint32_t buf[16];
2755 uint32_t *p = buf;
2756
2757 p += emit_pop (p, x0);
2758
2759 emit_ops_insns (buf, p - buf);
2760}
2761
2762/* Implementation of emit_ops method "emit_stack_flush". */
2763
2764static void
2765aarch64_emit_stack_flush (void)
2766{
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2769
2770 p += emit_push (p, x0);
2771
2772 emit_ops_insns (buf, p - buf);
2773}
2774
2775/* Implementation of emit_ops method "emit_zero_ext". */
2776
2777static void
2778aarch64_emit_zero_ext (int arg)
2779{
2780 uint32_t buf[16];
2781 uint32_t *p = buf;
2782
2783 p += emit_ubfx (p, x0, x0, 0, arg);
2784
2785 emit_ops_insns (buf, p - buf);
2786}
2787
2788/* Implementation of emit_ops method "emit_swap". */
2789
2790static void
2791aarch64_emit_swap (void)
2792{
2793 uint32_t buf[16];
2794 uint32_t *p = buf;
2795
2796 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2797 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2798 p += emit_mov (p, x0, register_operand (x1));
2799
2800 emit_ops_insns (buf, p - buf);
2801}
2802
2803/* Implementation of emit_ops method "emit_stack_adjust". */
2804
2805static void
2806aarch64_emit_stack_adjust (int n)
2807{
2808 /* This is not needed with our design. */
2809 uint32_t buf[16];
2810 uint32_t *p = buf;
2811
2812 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2813
2814 emit_ops_insns (buf, p - buf);
2815}
2816
2817/* Implementation of emit_ops method "emit_int_call_1". */
2818
2819static void
2820aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2821{
2822 uint32_t buf[16];
2823 uint32_t *p = buf;
2824
2825 p += emit_mov (p, x0, immediate_operand (arg1));
2826
2827 emit_ops_insns (buf, p - buf);
2828
2829 aarch64_emit_call (fn);
2830}
2831
2832/* Implementation of emit_ops method "emit_void_call_2". */
2833
2834static void
2835aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2836{
2837 uint32_t buf[16];
2838 uint32_t *p = buf;
2839
2840 /* Push x0 on the stack. */
2841 aarch64_emit_stack_flush ();
2842
2843 /* Setup arguments for the function call:
2844
2845 x0: arg1
2846 x1: top of the stack
2847
2848 MOV x1, x0
2849 MOV x0, #arg1 */
2850
2851 p += emit_mov (p, x1, register_operand (x0));
2852 p += emit_mov (p, x0, immediate_operand (arg1));
2853
2854 emit_ops_insns (buf, p - buf);
2855
2856 aarch64_emit_call (fn);
2857
2858 /* Restore x0. */
2859 aarch64_emit_pop ();
2860}
2861
2862/* Implementation of emit_ops method "emit_eq_goto". */
2863
2864static void
2865aarch64_emit_eq_goto (int *offset_p, int *size_p)
2866{
2867 uint32_t buf[16];
2868 uint32_t *p = buf;
2869
2870 p += emit_pop (p, x1);
2871 p += emit_cmp (p, x1, register_operand (x0));
2872 /* Branch over the next instruction if x0 != x1. */
2873 p += emit_bcond (p, NE, 8);
2874 /* The NOP instruction will be patched with an unconditional branch. */
2875 if (offset_p)
2876 *offset_p = (p - buf) * 4;
2877 if (size_p)
2878 *size_p = 4;
2879 p += emit_nop (p);
2880
2881 emit_ops_insns (buf, p - buf);
2882}
2883
2884/* Implementation of emit_ops method "emit_ne_goto". */
2885
2886static void
2887aarch64_emit_ne_goto (int *offset_p, int *size_p)
2888{
2889 uint32_t buf[16];
2890 uint32_t *p = buf;
2891
2892 p += emit_pop (p, x1);
2893 p += emit_cmp (p, x1, register_operand (x0));
2894 /* Branch over the next instruction if x0 == x1. */
2895 p += emit_bcond (p, EQ, 8);
2896 /* The NOP instruction will be patched with an unconditional branch. */
2897 if (offset_p)
2898 *offset_p = (p - buf) * 4;
2899 if (size_p)
2900 *size_p = 4;
2901 p += emit_nop (p);
2902
2903 emit_ops_insns (buf, p - buf);
2904}
2905
2906/* Implementation of emit_ops method "emit_lt_goto". */
2907
2908static void
2909aarch64_emit_lt_goto (int *offset_p, int *size_p)
2910{
2911 uint32_t buf[16];
2912 uint32_t *p = buf;
2913
2914 p += emit_pop (p, x1);
2915 p += emit_cmp (p, x1, register_operand (x0));
2916 /* Branch over the next instruction if x0 >= x1. */
2917 p += emit_bcond (p, GE, 8);
2918 /* The NOP instruction will be patched with an unconditional branch. */
2919 if (offset_p)
2920 *offset_p = (p - buf) * 4;
2921 if (size_p)
2922 *size_p = 4;
2923 p += emit_nop (p);
2924
2925 emit_ops_insns (buf, p - buf);
2926}
2927
2928/* Implementation of emit_ops method "emit_le_goto". */
2929
2930static void
2931aarch64_emit_le_goto (int *offset_p, int *size_p)
2932{
2933 uint32_t buf[16];
2934 uint32_t *p = buf;
2935
2936 p += emit_pop (p, x1);
2937 p += emit_cmp (p, x1, register_operand (x0));
2938 /* Branch over the next instruction if x0 > x1. */
2939 p += emit_bcond (p, GT, 8);
2940 /* The NOP instruction will be patched with an unconditional branch. */
2941 if (offset_p)
2942 *offset_p = (p - buf) * 4;
2943 if (size_p)
2944 *size_p = 4;
2945 p += emit_nop (p);
2946
2947 emit_ops_insns (buf, p - buf);
2948}
2949
2950/* Implementation of emit_ops method "emit_gt_goto". */
2951
2952static void
2953aarch64_emit_gt_goto (int *offset_p, int *size_p)
2954{
2955 uint32_t buf[16];
2956 uint32_t *p = buf;
2957
2958 p += emit_pop (p, x1);
2959 p += emit_cmp (p, x1, register_operand (x0));
2960 /* Branch over the next instruction if x0 <= x1. */
2961 p += emit_bcond (p, LE, 8);
2962 /* The NOP instruction will be patched with an unconditional branch. */
2963 if (offset_p)
2964 *offset_p = (p - buf) * 4;
2965 if (size_p)
2966 *size_p = 4;
2967 p += emit_nop (p);
2968
2969 emit_ops_insns (buf, p - buf);
2970}
2971
2972/* Implementation of emit_ops method "emit_ge_got". */
2973
2974static void
2975aarch64_emit_ge_got (int *offset_p, int *size_p)
2976{
2977 uint32_t buf[16];
2978 uint32_t *p = buf;
2979
2980 p += emit_pop (p, x1);
2981 p += emit_cmp (p, x1, register_operand (x0));
2982 /* Branch over the next instruction if x0 <= x1. */
2983 p += emit_bcond (p, LT, 8);
2984 /* The NOP instruction will be patched with an unconditional branch. */
2985 if (offset_p)
2986 *offset_p = (p - buf) * 4;
2987 if (size_p)
2988 *size_p = 4;
2989 p += emit_nop (p);
2990
2991 emit_ops_insns (buf, p - buf);
2992}
2993
2994static struct emit_ops aarch64_emit_ops_impl =
2995{
2996 aarch64_emit_prologue,
2997 aarch64_emit_epilogue,
2998 aarch64_emit_add,
2999 aarch64_emit_sub,
3000 aarch64_emit_mul,
3001 aarch64_emit_lsh,
3002 aarch64_emit_rsh_signed,
3003 aarch64_emit_rsh_unsigned,
3004 aarch64_emit_ext,
3005 aarch64_emit_log_not,
3006 aarch64_emit_bit_and,
3007 aarch64_emit_bit_or,
3008 aarch64_emit_bit_xor,
3009 aarch64_emit_bit_not,
3010 aarch64_emit_equal,
3011 aarch64_emit_less_signed,
3012 aarch64_emit_less_unsigned,
3013 aarch64_emit_ref,
3014 aarch64_emit_if_goto,
3015 aarch64_emit_goto,
3016 aarch64_write_goto_address,
3017 aarch64_emit_const,
3018 aarch64_emit_call,
3019 aarch64_emit_reg,
3020 aarch64_emit_pop,
3021 aarch64_emit_stack_flush,
3022 aarch64_emit_zero_ext,
3023 aarch64_emit_swap,
3024 aarch64_emit_stack_adjust,
3025 aarch64_emit_int_call_1,
3026 aarch64_emit_void_call_2,
3027 aarch64_emit_eq_goto,
3028 aarch64_emit_ne_goto,
3029 aarch64_emit_lt_goto,
3030 aarch64_emit_le_goto,
3031 aarch64_emit_gt_goto,
3032 aarch64_emit_ge_got,
3033};
3034
3035/* Implementation of linux_target_ops method "emit_ops". */
3036
3037static struct emit_ops *
3038aarch64_emit_ops (void)
3039{
3040 return &aarch64_emit_ops_impl;
3041}
3042
bb903df0
PL
3043/* Implementation of linux_target_ops method
3044 "get_min_fast_tracepoint_insn_len". */
3045
3046static int
3047aarch64_get_min_fast_tracepoint_insn_len (void)
3048{
3049 return 4;
3050}
3051
d1d0aea1
PL
3052/* Implementation of linux_target_ops method "supports_range_stepping". */
3053
3054static int
3055aarch64_supports_range_stepping (void)
3056{
3057 return 1;
3058}
3059
3ca4edb6 3060/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3061
3ca4edb6
TBA
3062const gdb_byte *
3063aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3064{
17b1509a
YQ
3065 if (is_64bit_tdesc ())
3066 {
3067 *size = aarch64_breakpoint_len;
3068 return aarch64_breakpoint;
3069 }
3070 else
3071 return arm_sw_breakpoint_from_kind (kind, size);
3072}
3073
06250e4e 3074/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3075
06250e4e
TBA
3076int
3077aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3078{
3079 if (is_64bit_tdesc ())
3080 return aarch64_breakpoint_len;
3081 else
3082 return arm_breakpoint_kind_from_pc (pcptr);
3083}
3084
06250e4e 3085/* Implementation of the target ops method
17b1509a
YQ
3086 "breakpoint_kind_from_current_state". */
3087
06250e4e
TBA
3088int
3089aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3090{
3091 if (is_64bit_tdesc ())
3092 return aarch64_breakpoint_len;
3093 else
3094 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3095}
3096
7d00775e
AT
3097/* Support for hardware single step. */
3098
3099static int
3100aarch64_supports_hardware_single_step (void)
3101{
3102 return 1;
3103}
3104
176eb98c
MS
3105struct linux_target_ops the_low_target =
3106{
802e8e6d 3107 aarch64_supports_z_point_type,
176eb98c
MS
3108 aarch64_insert_point,
3109 aarch64_remove_point,
3110 aarch64_stopped_by_watchpoint,
3111 aarch64_stopped_data_address,
421530db
PL
3112 NULL, /* collect_ptrace_register */
3113 NULL, /* supply_ptrace_register */
ade90bde 3114 aarch64_linux_siginfo_fixup,
176eb98c 3115 aarch64_linux_new_process,
04ec7890 3116 aarch64_linux_delete_process,
176eb98c 3117 aarch64_linux_new_thread,
466eecee 3118 aarch64_linux_delete_thread,
3a8a0396 3119 aarch64_linux_new_fork,
176eb98c 3120 aarch64_linux_prepare_to_resume,
421530db 3121 NULL, /* process_qsupported */
7671bf47 3122 aarch64_supports_tracepoints,
bb903df0
PL
3123 aarch64_get_thread_area,
3124 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3125 aarch64_emit_ops,
bb903df0 3126 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3127 aarch64_supports_range_stepping,
7d00775e 3128 aarch64_supports_hardware_single_step,
061fc021 3129 aarch64_get_syscall_trapinfo,
176eb98c 3130};
3aee8918 3131
ef0478f6
TBA
3132/* The linux target ops object. */
3133
3134linux_process_target *the_linux_target = &the_aarch64_target;
3135
3aee8918
PA
3136void
3137initialize_low_arch (void)
3138{
3b53ae99
YQ
3139 initialize_low_arch_aarch32 ();
3140
3aee8918 3141 initialize_regsets_info (&aarch64_regsets_info);
02895270 3142 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3143}
This page took 0.707704 seconds and 4 git commands to generate.