44f729386cdfa34dfcb399a348c1756c99f31ef3
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 protected:
59
60 void low_arch_setup () override;
61 };
62
63 /* The singleton target ops object. */
64
65 static aarch64_target the_aarch64_target;
66
67 /* Per-process arch-specific data we want to keep. */
68
69 struct arch_process_info
70 {
71 /* Hardware breakpoint/watchpoint data.
72 The reason for them to be per-process rather than per-thread is
73 due to the lack of information in the gdbserver environment;
74 gdbserver is not told that whether a requested hardware
75 breakpoint/watchpoint is thread specific or not, so it has to set
76 each hw bp/wp for every thread in the current process. The
77 higher level bp/wp management in gdb will resume a thread if a hw
78 bp/wp trap is not expected for it. Since the hw bp/wp setting is
79 same for each thread, it is reasonable for the data to live here.
80 */
81 struct aarch64_debug_reg_state debug_reg_state;
82 };
83
84 /* Return true if the size of register 0 is 8 byte. */
85
86 static int
87 is_64bit_tdesc (void)
88 {
89 struct regcache *regcache = get_thread_regcache (current_thread, 0);
90
91 return register_size (regcache->tdesc, 0) == 8;
92 }
93
94 /* Return true if the regcache contains the number of SVE registers. */
95
96 static bool
97 is_sve_tdesc (void)
98 {
99 struct regcache *regcache = get_thread_regcache (current_thread, 0);
100
101 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
102 }
103
104 static void
105 aarch64_fill_gregset (struct regcache *regcache, void *buf)
106 {
107 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
108 int i;
109
110 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
111 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
112 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
113 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
114 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
115 }
116
117 static void
118 aarch64_store_gregset (struct regcache *regcache, const void *buf)
119 {
120 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
121 int i;
122
123 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
124 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
125 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
126 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
127 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
128 }
129
130 static void
131 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
132 {
133 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
134 int i;
135
136 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
137 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
138 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
139 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
140 }
141
142 static void
143 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
144 {
145 const struct user_fpsimd_state *regset
146 = (const struct user_fpsimd_state *) buf;
147 int i;
148
149 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
150 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
151 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
152 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
153 }
154
155 /* Store the pauth registers to regcache. */
156
157 static void
158 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
159 {
160 uint64_t *pauth_regset = (uint64_t *) buf;
161 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
162
163 if (pauth_base == 0)
164 return;
165
166 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
167 &pauth_regset[0]);
168 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
169 &pauth_regset[1]);
170 }
171
172 /* Implementation of linux_target_ops method "get_pc". */
173
174 static CORE_ADDR
175 aarch64_get_pc (struct regcache *regcache)
176 {
177 if (register_size (regcache->tdesc, 0) == 8)
178 return linux_get_pc_64bit (regcache);
179 else
180 return linux_get_pc_32bit (regcache);
181 }
182
183 /* Implementation of linux_target_ops method "set_pc". */
184
185 static void
186 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
187 {
188 if (register_size (regcache->tdesc, 0) == 8)
189 linux_set_pc_64bit (regcache, pc);
190 else
191 linux_set_pc_32bit (regcache, pc);
192 }
193
194 #define aarch64_breakpoint_len 4
195
196 /* AArch64 BRK software debug mode instruction.
197 This instruction needs to match gdb/aarch64-tdep.c
198 (aarch64_default_breakpoint). */
199 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
200
201 /* Implementation of linux_target_ops method "breakpoint_at". */
202
203 static int
204 aarch64_breakpoint_at (CORE_ADDR where)
205 {
206 if (is_64bit_tdesc ())
207 {
208 gdb_byte insn[aarch64_breakpoint_len];
209
210 the_target->read_memory (where, (unsigned char *) &insn,
211 aarch64_breakpoint_len);
212 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
213 return 1;
214
215 return 0;
216 }
217 else
218 return arm_breakpoint_at (where);
219 }
220
221 static void
222 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
223 {
224 int i;
225
226 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
227 {
228 state->dr_addr_bp[i] = 0;
229 state->dr_ctrl_bp[i] = 0;
230 state->dr_ref_count_bp[i] = 0;
231 }
232
233 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
234 {
235 state->dr_addr_wp[i] = 0;
236 state->dr_ctrl_wp[i] = 0;
237 state->dr_ref_count_wp[i] = 0;
238 }
239 }
240
241 /* Return the pointer to the debug register state structure in the
242 current process' arch-specific data area. */
243
244 struct aarch64_debug_reg_state *
245 aarch64_get_debug_reg_state (pid_t pid)
246 {
247 struct process_info *proc = find_process_pid (pid);
248
249 return &proc->priv->arch_private->debug_reg_state;
250 }
251
252 /* Implementation of linux_target_ops method "supports_z_point_type". */
253
254 static int
255 aarch64_supports_z_point_type (char z_type)
256 {
257 switch (z_type)
258 {
259 case Z_PACKET_SW_BP:
260 case Z_PACKET_HW_BP:
261 case Z_PACKET_WRITE_WP:
262 case Z_PACKET_READ_WP:
263 case Z_PACKET_ACCESS_WP:
264 return 1;
265 default:
266 return 0;
267 }
268 }
269
270 /* Implementation of linux_target_ops method "insert_point".
271
272 It actually only records the info of the to-be-inserted bp/wp;
273 the actual insertion will happen when threads are resumed. */
274
275 static int
276 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
277 int len, struct raw_breakpoint *bp)
278 {
279 int ret;
280 enum target_hw_bp_type targ_type;
281 struct aarch64_debug_reg_state *state
282 = aarch64_get_debug_reg_state (pid_of (current_thread));
283
284 if (show_debug_regs)
285 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
286 (unsigned long) addr, len);
287
288 /* Determine the type from the raw breakpoint type. */
289 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
290
291 if (targ_type != hw_execute)
292 {
293 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
294 ret = aarch64_handle_watchpoint (targ_type, addr, len,
295 1 /* is_insert */, state);
296 else
297 ret = -1;
298 }
299 else
300 {
301 if (len == 3)
302 {
303 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
304 instruction. Set it to 2 to correctly encode length bit
305 mask in hardware/watchpoint control register. */
306 len = 2;
307 }
308 ret = aarch64_handle_breakpoint (targ_type, addr, len,
309 1 /* is_insert */, state);
310 }
311
312 if (show_debug_regs)
313 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
314 targ_type);
315
316 return ret;
317 }
318
319 /* Implementation of linux_target_ops method "remove_point".
320
321 It actually only records the info of the to-be-removed bp/wp,
322 the actual removal will be done when threads are resumed. */
323
324 static int
325 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
326 int len, struct raw_breakpoint *bp)
327 {
328 int ret;
329 enum target_hw_bp_type targ_type;
330 struct aarch64_debug_reg_state *state
331 = aarch64_get_debug_reg_state (pid_of (current_thread));
332
333 if (show_debug_regs)
334 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
335 (unsigned long) addr, len);
336
337 /* Determine the type from the raw breakpoint type. */
338 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
339
340 /* Set up state pointers. */
341 if (targ_type != hw_execute)
342 ret =
343 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
344 state);
345 else
346 {
347 if (len == 3)
348 {
349 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
350 instruction. Set it to 2 to correctly encode length bit
351 mask in hardware/watchpoint control register. */
352 len = 2;
353 }
354 ret = aarch64_handle_breakpoint (targ_type, addr, len,
355 0 /* is_insert */, state);
356 }
357
358 if (show_debug_regs)
359 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
360 targ_type);
361
362 return ret;
363 }
364
365 /* Implementation of linux_target_ops method "stopped_data_address". */
366
367 static CORE_ADDR
368 aarch64_stopped_data_address (void)
369 {
370 siginfo_t siginfo;
371 int pid, i;
372 struct aarch64_debug_reg_state *state;
373
374 pid = lwpid_of (current_thread);
375
376 /* Get the siginfo. */
377 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
378 return (CORE_ADDR) 0;
379
380 /* Need to be a hardware breakpoint/watchpoint trap. */
381 if (siginfo.si_signo != SIGTRAP
382 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
383 return (CORE_ADDR) 0;
384
385 /* Check if the address matches any watched address. */
386 state = aarch64_get_debug_reg_state (pid_of (current_thread));
387 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
388 {
389 const unsigned int offset
390 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
391 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
392 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
393 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
394 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
395 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
396
397 if (state->dr_ref_count_wp[i]
398 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
399 && addr_trap >= addr_watch_aligned
400 && addr_trap < addr_watch + len)
401 {
402 /* ADDR_TRAP reports the first address of the memory range
403 accessed by the CPU, regardless of what was the memory
404 range watched. Thus, a large CPU access that straddles
405 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
406 ADDR_TRAP that is lower than the
407 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
408
409 addr: | 4 | 5 | 6 | 7 | 8 |
410 |---- range watched ----|
411 |----------- range accessed ------------|
412
413 In this case, ADDR_TRAP will be 4.
414
415 To match a watchpoint known to GDB core, we must never
416 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
417 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
418 positive on kernels older than 4.10. See PR
419 external/20207. */
420 return addr_orig;
421 }
422 }
423
424 return (CORE_ADDR) 0;
425 }
426
427 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
428
429 static int
430 aarch64_stopped_by_watchpoint (void)
431 {
432 if (aarch64_stopped_data_address () != 0)
433 return 1;
434 else
435 return 0;
436 }
437
438 /* Fetch the thread-local storage pointer for libthread_db. */
439
440 ps_err_e
441 ps_get_thread_area (struct ps_prochandle *ph,
442 lwpid_t lwpid, int idx, void **base)
443 {
444 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
445 is_64bit_tdesc ());
446 }
447
448 /* Implementation of linux_target_ops method "siginfo_fixup". */
449
450 static int
451 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
452 {
453 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
454 if (!is_64bit_tdesc ())
455 {
456 if (direction == 0)
457 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
458 native);
459 else
460 aarch64_siginfo_from_compat_siginfo (native,
461 (struct compat_siginfo *) inf);
462
463 return 1;
464 }
465
466 return 0;
467 }
468
469 /* Implementation of linux_target_ops method "new_process". */
470
471 static struct arch_process_info *
472 aarch64_linux_new_process (void)
473 {
474 struct arch_process_info *info = XCNEW (struct arch_process_info);
475
476 aarch64_init_debug_reg_state (&info->debug_reg_state);
477
478 return info;
479 }
480
481 /* Implementation of linux_target_ops method "delete_process". */
482
483 static void
484 aarch64_linux_delete_process (struct arch_process_info *info)
485 {
486 xfree (info);
487 }
488
489 /* Implementation of linux_target_ops method "linux_new_fork". */
490
491 static void
492 aarch64_linux_new_fork (struct process_info *parent,
493 struct process_info *child)
494 {
495 /* These are allocated by linux_add_process. */
496 gdb_assert (parent->priv != NULL
497 && parent->priv->arch_private != NULL);
498 gdb_assert (child->priv != NULL
499 && child->priv->arch_private != NULL);
500
501 /* Linux kernel before 2.6.33 commit
502 72f674d203cd230426437cdcf7dd6f681dad8b0d
503 will inherit hardware debug registers from parent
504 on fork/vfork/clone. Newer Linux kernels create such tasks with
505 zeroed debug registers.
506
507 GDB core assumes the child inherits the watchpoints/hw
508 breakpoints of the parent, and will remove them all from the
509 forked off process. Copy the debug registers mirrors into the
510 new process so that all breakpoints and watchpoints can be
511 removed together. The debug registers mirror will become zeroed
512 in the end before detaching the forked off process, thus making
513 this compatible with older Linux kernels too. */
514
515 *child->priv->arch_private = *parent->priv->arch_private;
516 }
517
518 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
519 #define AARCH64_HWCAP_PACA (1 << 30)
520
521 /* Implementation of linux target ops method "low_arch_setup". */
522
523 void
524 aarch64_target::low_arch_setup ()
525 {
526 unsigned int machine;
527 int is_elf64;
528 int tid;
529
530 tid = lwpid_of (current_thread);
531
532 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
533
534 if (is_elf64)
535 {
536 uint64_t vq = aarch64_sve_get_vq (tid);
537 unsigned long hwcap = linux_get_hwcap (8);
538 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
539
540 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
541 }
542 else
543 current_process ()->tdesc = aarch32_linux_read_description ();
544
545 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
546 }
547
548 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
549
550 static void
551 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
552 {
553 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
554 }
555
556 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
557
558 static void
559 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
560 {
561 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
562 }
563
564 static struct regset_info aarch64_regsets[] =
565 {
566 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
567 sizeof (struct user_pt_regs), GENERAL_REGS,
568 aarch64_fill_gregset, aarch64_store_gregset },
569 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
570 sizeof (struct user_fpsimd_state), FP_REGS,
571 aarch64_fill_fpregset, aarch64_store_fpregset
572 },
573 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
574 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
575 NULL, aarch64_store_pauthregset },
576 NULL_REGSET
577 };
578
579 static struct regsets_info aarch64_regsets_info =
580 {
581 aarch64_regsets, /* regsets */
582 0, /* num_regsets */
583 NULL, /* disabled_regsets */
584 };
585
586 static struct regs_info regs_info_aarch64 =
587 {
588 NULL, /* regset_bitmap */
589 NULL, /* usrregs */
590 &aarch64_regsets_info,
591 };
592
593 static struct regset_info aarch64_sve_regsets[] =
594 {
595 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
596 sizeof (struct user_pt_regs), GENERAL_REGS,
597 aarch64_fill_gregset, aarch64_store_gregset },
598 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
599 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
600 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
601 },
602 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
603 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
604 NULL, aarch64_store_pauthregset },
605 NULL_REGSET
606 };
607
608 static struct regsets_info aarch64_sve_regsets_info =
609 {
610 aarch64_sve_regsets, /* regsets. */
611 0, /* num_regsets. */
612 NULL, /* disabled_regsets. */
613 };
614
615 static struct regs_info regs_info_aarch64_sve =
616 {
617 NULL, /* regset_bitmap. */
618 NULL, /* usrregs. */
619 &aarch64_sve_regsets_info,
620 };
621
622 /* Implementation of linux_target_ops method "regs_info". */
623
624 static const struct regs_info *
625 aarch64_regs_info (void)
626 {
627 if (!is_64bit_tdesc ())
628 return &regs_info_aarch32;
629
630 if (is_sve_tdesc ())
631 return &regs_info_aarch64_sve;
632
633 return &regs_info_aarch64;
634 }
635
636 /* Implementation of linux_target_ops method "supports_tracepoints". */
637
638 static int
639 aarch64_supports_tracepoints (void)
640 {
641 if (current_thread == NULL)
642 return 1;
643 else
644 {
645 /* We don't support tracepoints on aarch32 now. */
646 return is_64bit_tdesc ();
647 }
648 }
649
650 /* Implementation of linux_target_ops method "get_thread_area". */
651
652 static int
653 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
654 {
655 struct iovec iovec;
656 uint64_t reg;
657
658 iovec.iov_base = &reg;
659 iovec.iov_len = sizeof (reg);
660
661 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
662 return -1;
663
664 *addrp = reg;
665
666 return 0;
667 }
668
669 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
670
671 static void
672 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
673 {
674 int use_64bit = register_size (regcache->tdesc, 0) == 8;
675
676 if (use_64bit)
677 {
678 long l_sysno;
679
680 collect_register_by_name (regcache, "x8", &l_sysno);
681 *sysno = (int) l_sysno;
682 }
683 else
684 collect_register_by_name (regcache, "r7", sysno);
685 }
686
687 /* List of condition codes that we need. */
688
689 enum aarch64_condition_codes
690 {
691 EQ = 0x0,
692 NE = 0x1,
693 LO = 0x3,
694 GE = 0xa,
695 LT = 0xb,
696 GT = 0xc,
697 LE = 0xd,
698 };
699
700 enum aarch64_operand_type
701 {
702 OPERAND_IMMEDIATE,
703 OPERAND_REGISTER,
704 };
705
706 /* Representation of an operand. At this time, it only supports register
707 and immediate types. */
708
709 struct aarch64_operand
710 {
711 /* Type of the operand. */
712 enum aarch64_operand_type type;
713
714 /* Value of the operand according to the type. */
715 union
716 {
717 uint32_t imm;
718 struct aarch64_register reg;
719 };
720 };
721
722 /* List of registers that we are currently using, we can add more here as
723 we need to use them. */
724
725 /* General purpose scratch registers (64 bit). */
726 static const struct aarch64_register x0 = { 0, 1 };
727 static const struct aarch64_register x1 = { 1, 1 };
728 static const struct aarch64_register x2 = { 2, 1 };
729 static const struct aarch64_register x3 = { 3, 1 };
730 static const struct aarch64_register x4 = { 4, 1 };
731
732 /* General purpose scratch registers (32 bit). */
733 static const struct aarch64_register w0 = { 0, 0 };
734 static const struct aarch64_register w2 = { 2, 0 };
735
736 /* Intra-procedure scratch registers. */
737 static const struct aarch64_register ip0 = { 16, 1 };
738
739 /* Special purpose registers. */
740 static const struct aarch64_register fp = { 29, 1 };
741 static const struct aarch64_register lr = { 30, 1 };
742 static const struct aarch64_register sp = { 31, 1 };
743 static const struct aarch64_register xzr = { 31, 1 };
744
745 /* Dynamically allocate a new register. If we know the register
746 statically, we should make it a global as above instead of using this
747 helper function. */
748
749 static struct aarch64_register
750 aarch64_register (unsigned num, int is64)
751 {
752 return (struct aarch64_register) { num, is64 };
753 }
754
755 /* Helper function to create a register operand, for instructions with
756 different types of operands.
757
758 For example:
759 p += emit_mov (p, x0, register_operand (x1)); */
760
761 static struct aarch64_operand
762 register_operand (struct aarch64_register reg)
763 {
764 struct aarch64_operand operand;
765
766 operand.type = OPERAND_REGISTER;
767 operand.reg = reg;
768
769 return operand;
770 }
771
772 /* Helper function to create an immediate operand, for instructions with
773 different types of operands.
774
775 For example:
776 p += emit_mov (p, x0, immediate_operand (12)); */
777
778 static struct aarch64_operand
779 immediate_operand (uint32_t imm)
780 {
781 struct aarch64_operand operand;
782
783 operand.type = OPERAND_IMMEDIATE;
784 operand.imm = imm;
785
786 return operand;
787 }
788
789 /* Helper function to create an offset memory operand.
790
791 For example:
792 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
793
794 static struct aarch64_memory_operand
795 offset_memory_operand (int32_t offset)
796 {
797 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
798 }
799
800 /* Helper function to create a pre-index memory operand.
801
802 For example:
803 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
804
805 static struct aarch64_memory_operand
806 preindex_memory_operand (int32_t index)
807 {
808 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
809 }
810
811 /* Helper function to create a post-index memory operand.
812
813 For example:
814 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
815
816 static struct aarch64_memory_operand
817 postindex_memory_operand (int32_t index)
818 {
819 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
820 }
821
822 /* System control registers. These special registers can be written and
823 read with the MRS and MSR instructions.
824
825 - NZCV: Condition flags. GDB refers to this register under the CPSR
826 name.
827 - FPSR: Floating-point status register.
828 - FPCR: Floating-point control registers.
829 - TPIDR_EL0: Software thread ID register. */
830
831 enum aarch64_system_control_registers
832 {
833 /* op0 op1 crn crm op2 */
834 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
835 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
836 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
837 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
838 };
839
840 /* Write a BLR instruction into *BUF.
841
842 BLR rn
843
844 RN is the register to branch to. */
845
846 static int
847 emit_blr (uint32_t *buf, struct aarch64_register rn)
848 {
849 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
850 }
851
852 /* Write a RET instruction into *BUF.
853
854 RET xn
855
856 RN is the register to branch to. */
857
858 static int
859 emit_ret (uint32_t *buf, struct aarch64_register rn)
860 {
861 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
862 }
863
864 static int
865 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
866 struct aarch64_register rt,
867 struct aarch64_register rt2,
868 struct aarch64_register rn,
869 struct aarch64_memory_operand operand)
870 {
871 uint32_t opc;
872 uint32_t pre_index;
873 uint32_t write_back;
874
875 if (rt.is64)
876 opc = ENCODE (2, 2, 30);
877 else
878 opc = ENCODE (0, 2, 30);
879
880 switch (operand.type)
881 {
882 case MEMORY_OPERAND_OFFSET:
883 {
884 pre_index = ENCODE (1, 1, 24);
885 write_back = ENCODE (0, 1, 23);
886 break;
887 }
888 case MEMORY_OPERAND_POSTINDEX:
889 {
890 pre_index = ENCODE (0, 1, 24);
891 write_back = ENCODE (1, 1, 23);
892 break;
893 }
894 case MEMORY_OPERAND_PREINDEX:
895 {
896 pre_index = ENCODE (1, 1, 24);
897 write_back = ENCODE (1, 1, 23);
898 break;
899 }
900 default:
901 return 0;
902 }
903
904 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
905 | ENCODE (operand.index >> 3, 7, 15)
906 | ENCODE (rt2.num, 5, 10)
907 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
908 }
909
910 /* Write a STP instruction into *BUF.
911
912 STP rt, rt2, [rn, #offset]
913 STP rt, rt2, [rn, #index]!
914 STP rt, rt2, [rn], #index
915
916 RT and RT2 are the registers to store.
917 RN is the base address register.
918 OFFSET is the immediate to add to the base address. It is limited to a
919 -512 .. 504 range (7 bits << 3). */
920
921 static int
922 emit_stp (uint32_t *buf, struct aarch64_register rt,
923 struct aarch64_register rt2, struct aarch64_register rn,
924 struct aarch64_memory_operand operand)
925 {
926 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
927 }
928
929 /* Write a LDP instruction into *BUF.
930
931 LDP rt, rt2, [rn, #offset]
932 LDP rt, rt2, [rn, #index]!
933 LDP rt, rt2, [rn], #index
934
935 RT and RT2 are the registers to store.
936 RN is the base address register.
937 OFFSET is the immediate to add to the base address. It is limited to a
938 -512 .. 504 range (7 bits << 3). */
939
940 static int
941 emit_ldp (uint32_t *buf, struct aarch64_register rt,
942 struct aarch64_register rt2, struct aarch64_register rn,
943 struct aarch64_memory_operand operand)
944 {
945 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
946 }
947
948 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
949
950 LDP qt, qt2, [rn, #offset]
951
952 RT and RT2 are the Q registers to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to
955 -1024 .. 1008 range (7 bits << 4). */
956
957 static int
958 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
959 struct aarch64_register rn, int32_t offset)
960 {
961 uint32_t opc = ENCODE (2, 2, 30);
962 uint32_t pre_index = ENCODE (1, 1, 24);
963
964 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
965 | ENCODE (offset >> 4, 7, 15)
966 | ENCODE (rt2, 5, 10)
967 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
968 }
969
970 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
971
972 STP qt, qt2, [rn, #offset]
973
974 RT and RT2 are the Q registers to store.
975 RN is the base address register.
976 OFFSET is the immediate to add to the base address. It is limited to
977 -1024 .. 1008 range (7 bits << 4). */
978
979 static int
980 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
981 struct aarch64_register rn, int32_t offset)
982 {
983 uint32_t opc = ENCODE (2, 2, 30);
984 uint32_t pre_index = ENCODE (1, 1, 24);
985
986 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
987 | ENCODE (offset >> 4, 7, 15)
988 | ENCODE (rt2, 5, 10)
989 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
990 }
991
992 /* Write a LDRH instruction into *BUF.
993
994 LDRH wt, [xn, #offset]
995 LDRH wt, [xn, #index]!
996 LDRH wt, [xn], #index
997
998 RT is the register to store.
999 RN is the base address register.
1000 OFFSET is the immediate to add to the base address. It is limited to
1001 0 .. 32760 range (12 bits << 3). */
1002
1003 static int
1004 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1005 struct aarch64_register rn,
1006 struct aarch64_memory_operand operand)
1007 {
1008 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1009 }
1010
1011 /* Write a LDRB instruction into *BUF.
1012
1013 LDRB wt, [xn, #offset]
1014 LDRB wt, [xn, #index]!
1015 LDRB wt, [xn], #index
1016
1017 RT is the register to store.
1018 RN is the base address register.
1019 OFFSET is the immediate to add to the base address. It is limited to
1020 0 .. 32760 range (12 bits << 3). */
1021
1022 static int
1023 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1024 struct aarch64_register rn,
1025 struct aarch64_memory_operand operand)
1026 {
1027 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1028 }
1029
1030
1031
1032 /* Write a STR instruction into *BUF.
1033
1034 STR rt, [rn, #offset]
1035 STR rt, [rn, #index]!
1036 STR rt, [rn], #index
1037
1038 RT is the register to store.
1039 RN is the base address register.
1040 OFFSET is the immediate to add to the base address. It is limited to
1041 0 .. 32760 range (12 bits << 3). */
1042
1043 static int
1044 emit_str (uint32_t *buf, struct aarch64_register rt,
1045 struct aarch64_register rn,
1046 struct aarch64_memory_operand operand)
1047 {
1048 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1049 }
1050
1051 /* Helper function emitting an exclusive load or store instruction. */
1052
1053 static int
1054 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1055 enum aarch64_opcodes opcode,
1056 struct aarch64_register rs,
1057 struct aarch64_register rt,
1058 struct aarch64_register rt2,
1059 struct aarch64_register rn)
1060 {
1061 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1062 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1063 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1064 }
1065
1066 /* Write a LAXR instruction into *BUF.
1067
1068 LDAXR rt, [xn]
1069
1070 RT is the destination register.
1071 RN is the base address register. */
1072
1073 static int
1074 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1075 struct aarch64_register rn)
1076 {
1077 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1078 xzr, rn);
1079 }
1080
1081 /* Write a STXR instruction into *BUF.
1082
1083 STXR ws, rt, [xn]
1084
1085 RS is the result register, it indicates if the store succeeded or not.
1086 RT is the destination register.
1087 RN is the base address register. */
1088
1089 static int
1090 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1091 struct aarch64_register rt, struct aarch64_register rn)
1092 {
1093 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1094 xzr, rn);
1095 }
1096
1097 /* Write a STLR instruction into *BUF.
1098
1099 STLR rt, [xn]
1100
1101 RT is the register to store.
1102 RN is the base address register. */
1103
1104 static int
1105 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1106 struct aarch64_register rn)
1107 {
1108 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1109 xzr, rn);
1110 }
1111
1112 /* Helper function for data processing instructions with register sources. */
1113
1114 static int
1115 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1116 struct aarch64_register rd,
1117 struct aarch64_register rn,
1118 struct aarch64_register rm)
1119 {
1120 uint32_t size = ENCODE (rd.is64, 1, 31);
1121
1122 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1123 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1124 }
1125
1126 /* Helper function for data processing instructions taking either a register
1127 or an immediate. */
1128
1129 static int
1130 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1131 struct aarch64_register rd,
1132 struct aarch64_register rn,
1133 struct aarch64_operand operand)
1134 {
1135 uint32_t size = ENCODE (rd.is64, 1, 31);
1136 /* The opcode is different for register and immediate source operands. */
1137 uint32_t operand_opcode;
1138
1139 if (operand.type == OPERAND_IMMEDIATE)
1140 {
1141 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1142 operand_opcode = ENCODE (8, 4, 25);
1143
1144 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1145 | ENCODE (operand.imm, 12, 10)
1146 | ENCODE (rn.num, 5, 5)
1147 | ENCODE (rd.num, 5, 0));
1148 }
1149 else
1150 {
1151 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1152 operand_opcode = ENCODE (5, 4, 25);
1153
1154 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1155 rn, operand.reg);
1156 }
1157 }
1158
1159 /* Write an ADD instruction into *BUF.
1160
1161 ADD rd, rn, #imm
1162 ADD rd, rn, rm
1163
1164 This function handles both an immediate and register add.
1165
1166 RD is the destination register.
1167 RN is the input register.
1168 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1169 OPERAND_REGISTER. */
1170
1171 static int
1172 emit_add (uint32_t *buf, struct aarch64_register rd,
1173 struct aarch64_register rn, struct aarch64_operand operand)
1174 {
1175 return emit_data_processing (buf, ADD, rd, rn, operand);
1176 }
1177
1178 /* Write a SUB instruction into *BUF.
1179
1180 SUB rd, rn, #imm
1181 SUB rd, rn, rm
1182
1183 This function handles both an immediate and register sub.
1184
1185 RD is the destination register.
1186 RN is the input register.
1187 IMM is the immediate to substract to RN. */
1188
1189 static int
1190 emit_sub (uint32_t *buf, struct aarch64_register rd,
1191 struct aarch64_register rn, struct aarch64_operand operand)
1192 {
1193 return emit_data_processing (buf, SUB, rd, rn, operand);
1194 }
1195
1196 /* Write a MOV instruction into *BUF.
1197
1198 MOV rd, #imm
1199 MOV rd, rm
1200
1201 This function handles both a wide immediate move and a register move,
1202 with the condition that the source register is not xzr. xzr and the
1203 stack pointer share the same encoding and this function only supports
1204 the stack pointer.
1205
1206 RD is the destination register.
1207 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1208 OPERAND_REGISTER. */
1209
1210 static int
1211 emit_mov (uint32_t *buf, struct aarch64_register rd,
1212 struct aarch64_operand operand)
1213 {
1214 if (operand.type == OPERAND_IMMEDIATE)
1215 {
1216 uint32_t size = ENCODE (rd.is64, 1, 31);
1217 /* Do not shift the immediate. */
1218 uint32_t shift = ENCODE (0, 2, 21);
1219
1220 return aarch64_emit_insn (buf, MOV | size | shift
1221 | ENCODE (operand.imm, 16, 5)
1222 | ENCODE (rd.num, 5, 0));
1223 }
1224 else
1225 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1226 }
1227
1228 /* Write a MOVK instruction into *BUF.
1229
1230 MOVK rd, #imm, lsl #shift
1231
1232 RD is the destination register.
1233 IMM is the immediate.
1234 SHIFT is the logical shift left to apply to IMM. */
1235
1236 static int
1237 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1238 unsigned shift)
1239 {
1240 uint32_t size = ENCODE (rd.is64, 1, 31);
1241
1242 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1243 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1244 }
1245
1246 /* Write instructions into *BUF in order to move ADDR into a register.
1247 ADDR can be a 64-bit value.
1248
1249 This function will emit a series of MOV and MOVK instructions, such as:
1250
1251 MOV xd, #(addr)
1252 MOVK xd, #(addr >> 16), lsl #16
1253 MOVK xd, #(addr >> 32), lsl #32
1254 MOVK xd, #(addr >> 48), lsl #48 */
1255
1256 static int
1257 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1258 {
1259 uint32_t *p = buf;
1260
1261 /* The MOV (wide immediate) instruction clears to top bits of the
1262 register. */
1263 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1264
1265 if ((addr >> 16) != 0)
1266 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1267 else
1268 return p - buf;
1269
1270 if ((addr >> 32) != 0)
1271 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1272 else
1273 return p - buf;
1274
1275 if ((addr >> 48) != 0)
1276 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1277
1278 return p - buf;
1279 }
1280
1281 /* Write a SUBS instruction into *BUF.
1282
1283 SUBS rd, rn, rm
1284
1285 This instruction update the condition flags.
1286
1287 RD is the destination register.
1288 RN and RM are the source registers. */
1289
1290 static int
1291 emit_subs (uint32_t *buf, struct aarch64_register rd,
1292 struct aarch64_register rn, struct aarch64_operand operand)
1293 {
1294 return emit_data_processing (buf, SUBS, rd, rn, operand);
1295 }
1296
1297 /* Write a CMP instruction into *BUF.
1298
1299 CMP rn, rm
1300
1301 This instruction is an alias of SUBS xzr, rn, rm.
1302
1303 RN and RM are the registers to compare. */
1304
1305 static int
1306 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1307 struct aarch64_operand operand)
1308 {
1309 return emit_subs (buf, xzr, rn, operand);
1310 }
1311
1312 /* Write a AND instruction into *BUF.
1313
1314 AND rd, rn, rm
1315
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1318
1319 static int
1320 emit_and (uint32_t *buf, struct aarch64_register rd,
1321 struct aarch64_register rn, struct aarch64_register rm)
1322 {
1323 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1324 }
1325
1326 /* Write a ORR instruction into *BUF.
1327
1328 ORR rd, rn, rm
1329
1330 RD is the destination register.
1331 RN and RM are the source registers. */
1332
1333 static int
1334 emit_orr (uint32_t *buf, struct aarch64_register rd,
1335 struct aarch64_register rn, struct aarch64_register rm)
1336 {
1337 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1338 }
1339
1340 /* Write a ORN instruction into *BUF.
1341
1342 ORN rd, rn, rm
1343
1344 RD is the destination register.
1345 RN and RM are the source registers. */
1346
1347 static int
1348 emit_orn (uint32_t *buf, struct aarch64_register rd,
1349 struct aarch64_register rn, struct aarch64_register rm)
1350 {
1351 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1352 }
1353
1354 /* Write a EOR instruction into *BUF.
1355
1356 EOR rd, rn, rm
1357
1358 RD is the destination register.
1359 RN and RM are the source registers. */
1360
1361 static int
1362 emit_eor (uint32_t *buf, struct aarch64_register rd,
1363 struct aarch64_register rn, struct aarch64_register rm)
1364 {
1365 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1366 }
1367
1368 /* Write a MVN instruction into *BUF.
1369
1370 MVN rd, rm
1371
1372 This is an alias for ORN rd, xzr, rm.
1373
1374 RD is the destination register.
1375 RM is the source register. */
1376
1377 static int
1378 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1379 struct aarch64_register rm)
1380 {
1381 return emit_orn (buf, rd, xzr, rm);
1382 }
1383
1384 /* Write a LSLV instruction into *BUF.
1385
1386 LSLV rd, rn, rm
1387
1388 RD is the destination register.
1389 RN and RM are the source registers. */
1390
1391 static int
1392 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1393 struct aarch64_register rn, struct aarch64_register rm)
1394 {
1395 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1396 }
1397
1398 /* Write a LSRV instruction into *BUF.
1399
1400 LSRV rd, rn, rm
1401
1402 RD is the destination register.
1403 RN and RM are the source registers. */
1404
1405 static int
1406 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1407 struct aarch64_register rn, struct aarch64_register rm)
1408 {
1409 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1410 }
1411
1412 /* Write a ASRV instruction into *BUF.
1413
1414 ASRV rd, rn, rm
1415
1416 RD is the destination register.
1417 RN and RM are the source registers. */
1418
1419 static int
1420 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1421 struct aarch64_register rn, struct aarch64_register rm)
1422 {
1423 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1424 }
1425
1426 /* Write a MUL instruction into *BUF.
1427
1428 MUL rd, rn, rm
1429
1430 RD is the destination register.
1431 RN and RM are the source registers. */
1432
1433 static int
1434 emit_mul (uint32_t *buf, struct aarch64_register rd,
1435 struct aarch64_register rn, struct aarch64_register rm)
1436 {
1437 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1438 }
1439
1440 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1441
1442 MRS xt, system_reg
1443
1444 RT is the destination register.
1445 SYSTEM_REG is special purpose register to read. */
1446
1447 static int
1448 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1449 enum aarch64_system_control_registers system_reg)
1450 {
1451 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1452 | ENCODE (rt.num, 5, 0));
1453 }
1454
1455 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1456
1457 MSR system_reg, xt
1458
1459 SYSTEM_REG is special purpose register to write.
1460 RT is the input register. */
1461
1462 static int
1463 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1464 struct aarch64_register rt)
1465 {
1466 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1467 | ENCODE (rt.num, 5, 0));
1468 }
1469
1470 /* Write a SEVL instruction into *BUF.
1471
1472 This is a hint instruction telling the hardware to trigger an event. */
1473
1474 static int
1475 emit_sevl (uint32_t *buf)
1476 {
1477 return aarch64_emit_insn (buf, SEVL);
1478 }
1479
1480 /* Write a WFE instruction into *BUF.
1481
1482 This is a hint instruction telling the hardware to wait for an event. */
1483
1484 static int
1485 emit_wfe (uint32_t *buf)
1486 {
1487 return aarch64_emit_insn (buf, WFE);
1488 }
1489
1490 /* Write a SBFM instruction into *BUF.
1491
1492 SBFM rd, rn, #immr, #imms
1493
1494 This instruction moves the bits from #immr to #imms into the
1495 destination, sign extending the result.
1496
1497 RD is the destination register.
1498 RN is the source register.
1499 IMMR is the bit number to start at (least significant bit).
1500 IMMS is the bit number to stop at (most significant bit). */
1501
1502 static int
1503 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1504 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1505 {
1506 uint32_t size = ENCODE (rd.is64, 1, 31);
1507 uint32_t n = ENCODE (rd.is64, 1, 22);
1508
1509 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1510 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1511 | ENCODE (rd.num, 5, 0));
1512 }
1513
1514 /* Write a SBFX instruction into *BUF.
1515
1516 SBFX rd, rn, #lsb, #width
1517
1518 This instruction moves #width bits from #lsb into the destination, sign
1519 extending the result. This is an alias for:
1520
1521 SBFM rd, rn, #lsb, #(lsb + width - 1)
1522
1523 RD is the destination register.
1524 RN is the source register.
1525 LSB is the bit number to start at (least significant bit).
1526 WIDTH is the number of bits to move. */
1527
1528 static int
1529 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1530 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1531 {
1532 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1533 }
1534
1535 /* Write a UBFM instruction into *BUF.
1536
1537 UBFM rd, rn, #immr, #imms
1538
1539 This instruction moves the bits from #immr to #imms into the
1540 destination, extending the result with zeros.
1541
1542 RD is the destination register.
1543 RN is the source register.
1544 IMMR is the bit number to start at (least significant bit).
1545 IMMS is the bit number to stop at (most significant bit). */
1546
1547 static int
1548 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1549 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1550 {
1551 uint32_t size = ENCODE (rd.is64, 1, 31);
1552 uint32_t n = ENCODE (rd.is64, 1, 22);
1553
1554 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1555 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1556 | ENCODE (rd.num, 5, 0));
1557 }
1558
1559 /* Write a UBFX instruction into *BUF.
1560
1561 UBFX rd, rn, #lsb, #width
1562
1563 This instruction moves #width bits from #lsb into the destination,
1564 extending the result with zeros. This is an alias for:
1565
1566 UBFM rd, rn, #lsb, #(lsb + width - 1)
1567
1568 RD is the destination register.
1569 RN is the source register.
1570 LSB is the bit number to start at (least significant bit).
1571 WIDTH is the number of bits to move. */
1572
1573 static int
1574 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1575 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1576 {
1577 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1578 }
1579
1580 /* Write a CSINC instruction into *BUF.
1581
1582 CSINC rd, rn, rm, cond
1583
1584 This instruction conditionally increments rn or rm and places the result
1585 in rd. rn is chosen is the condition is true.
1586
1587 RD is the destination register.
1588 RN and RM are the source registers.
1589 COND is the encoded condition. */
1590
1591 static int
1592 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1593 struct aarch64_register rn, struct aarch64_register rm,
1594 unsigned cond)
1595 {
1596 uint32_t size = ENCODE (rd.is64, 1, 31);
1597
1598 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1599 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1600 | ENCODE (rd.num, 5, 0));
1601 }
1602
1603 /* Write a CSET instruction into *BUF.
1604
1605 CSET rd, cond
1606
1607 This instruction conditionally write 1 or 0 in the destination register.
1608 1 is written if the condition is true. This is an alias for:
1609
1610 CSINC rd, xzr, xzr, !cond
1611
1612 Note that the condition needs to be inverted.
1613
1614 RD is the destination register.
1615 RN and RM are the source registers.
1616 COND is the encoded condition. */
1617
1618 static int
1619 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1620 {
1621 /* The least significant bit of the condition needs toggling in order to
1622 invert it. */
1623 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1624 }
1625
1626 /* Write LEN instructions from BUF into the inferior memory at *TO.
1627
1628 Note instructions are always little endian on AArch64, unlike data. */
1629
1630 static void
1631 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1632 {
1633 size_t byte_len = len * sizeof (uint32_t);
1634 #if (__BYTE_ORDER == __BIG_ENDIAN)
1635 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1636 size_t i;
1637
1638 for (i = 0; i < len; i++)
1639 le_buf[i] = htole32 (buf[i]);
1640
1641 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1642
1643 xfree (le_buf);
1644 #else
1645 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1646 #endif
1647
1648 *to += byte_len;
1649 }
1650
1651 /* Sub-class of struct aarch64_insn_data, store information of
1652 instruction relocation for fast tracepoint. Visitor can
1653 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1654 the relocated instructions in buffer pointed by INSN_PTR. */
1655
1656 struct aarch64_insn_relocation_data
1657 {
1658 struct aarch64_insn_data base;
1659
1660 /* The new address the instruction is relocated to. */
1661 CORE_ADDR new_addr;
1662 /* Pointer to the buffer of relocated instruction(s). */
1663 uint32_t *insn_ptr;
1664 };
1665
1666 /* Implementation of aarch64_insn_visitor method "b". */
1667
1668 static void
1669 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1670 struct aarch64_insn_data *data)
1671 {
1672 struct aarch64_insn_relocation_data *insn_reloc
1673 = (struct aarch64_insn_relocation_data *) data;
1674 int64_t new_offset
1675 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1676
1677 if (can_encode_int32 (new_offset, 28))
1678 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1679 }
1680
1681 /* Implementation of aarch64_insn_visitor method "b_cond". */
1682
1683 static void
1684 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1685 struct aarch64_insn_data *data)
1686 {
1687 struct aarch64_insn_relocation_data *insn_reloc
1688 = (struct aarch64_insn_relocation_data *) data;
1689 int64_t new_offset
1690 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1691
1692 if (can_encode_int32 (new_offset, 21))
1693 {
1694 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1695 new_offset);
1696 }
1697 else if (can_encode_int32 (new_offset, 28))
1698 {
1699 /* The offset is out of range for a conditional branch
1700 instruction but not for a unconditional branch. We can use
1701 the following instructions instead:
1702
1703 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1704 B NOT_TAKEN ; Else jump over TAKEN and continue.
1705 TAKEN:
1706 B #(offset - 8)
1707 NOT_TAKEN:
1708
1709 */
1710
1711 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1712 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1713 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1714 }
1715 }
1716
1717 /* Implementation of aarch64_insn_visitor method "cb". */
1718
1719 static void
1720 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1721 const unsigned rn, int is64,
1722 struct aarch64_insn_data *data)
1723 {
1724 struct aarch64_insn_relocation_data *insn_reloc
1725 = (struct aarch64_insn_relocation_data *) data;
1726 int64_t new_offset
1727 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1728
1729 if (can_encode_int32 (new_offset, 21))
1730 {
1731 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1732 aarch64_register (rn, is64), new_offset);
1733 }
1734 else if (can_encode_int32 (new_offset, 28))
1735 {
1736 /* The offset is out of range for a compare and branch
1737 instruction but not for a unconditional branch. We can use
1738 the following instructions instead:
1739
1740 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1741 B NOT_TAKEN ; Else jump over TAKEN and continue.
1742 TAKEN:
1743 B #(offset - 8)
1744 NOT_TAKEN:
1745
1746 */
1747 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1748 aarch64_register (rn, is64), 8);
1749 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1750 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1751 }
1752 }
1753
1754 /* Implementation of aarch64_insn_visitor method "tb". */
1755
1756 static void
1757 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1758 const unsigned rt, unsigned bit,
1759 struct aarch64_insn_data *data)
1760 {
1761 struct aarch64_insn_relocation_data *insn_reloc
1762 = (struct aarch64_insn_relocation_data *) data;
1763 int64_t new_offset
1764 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1765
1766 if (can_encode_int32 (new_offset, 16))
1767 {
1768 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1769 aarch64_register (rt, 1), new_offset);
1770 }
1771 else if (can_encode_int32 (new_offset, 28))
1772 {
1773 /* The offset is out of range for a test bit and branch
1774 instruction but not for a unconditional branch. We can use
1775 the following instructions instead:
1776
1777 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1778 B NOT_TAKEN ; Else jump over TAKEN and continue.
1779 TAKEN:
1780 B #(offset - 8)
1781 NOT_TAKEN:
1782
1783 */
1784 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1785 aarch64_register (rt, 1), 8);
1786 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1787 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1788 new_offset - 8);
1789 }
1790 }
1791
1792 /* Implementation of aarch64_insn_visitor method "adr". */
1793
1794 static void
1795 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1796 const int is_adrp,
1797 struct aarch64_insn_data *data)
1798 {
1799 struct aarch64_insn_relocation_data *insn_reloc
1800 = (struct aarch64_insn_relocation_data *) data;
1801 /* We know exactly the address the ADR{P,} instruction will compute.
1802 We can just write it to the destination register. */
1803 CORE_ADDR address = data->insn_addr + offset;
1804
1805 if (is_adrp)
1806 {
1807 /* Clear the lower 12 bits of the offset to get the 4K page. */
1808 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1809 aarch64_register (rd, 1),
1810 address & ~0xfff);
1811 }
1812 else
1813 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1814 aarch64_register (rd, 1), address);
1815 }
1816
1817 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1818
1819 static void
1820 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1821 const unsigned rt, const int is64,
1822 struct aarch64_insn_data *data)
1823 {
1824 struct aarch64_insn_relocation_data *insn_reloc
1825 = (struct aarch64_insn_relocation_data *) data;
1826 CORE_ADDR address = data->insn_addr + offset;
1827
1828 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1829 aarch64_register (rt, 1), address);
1830
1831 /* We know exactly what address to load from, and what register we
1832 can use:
1833
1834 MOV xd, #(oldloc + offset)
1835 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1836 ...
1837
1838 LDR xd, [xd] ; or LDRSW xd, [xd]
1839
1840 */
1841
1842 if (is_sw)
1843 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1844 aarch64_register (rt, 1),
1845 aarch64_register (rt, 1),
1846 offset_memory_operand (0));
1847 else
1848 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1849 aarch64_register (rt, is64),
1850 aarch64_register (rt, 1),
1851 offset_memory_operand (0));
1852 }
1853
1854 /* Implementation of aarch64_insn_visitor method "others". */
1855
1856 static void
1857 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1858 struct aarch64_insn_data *data)
1859 {
1860 struct aarch64_insn_relocation_data *insn_reloc
1861 = (struct aarch64_insn_relocation_data *) data;
1862
1863 /* The instruction is not PC relative. Just re-emit it at the new
1864 location. */
1865 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1866 }
1867
1868 static const struct aarch64_insn_visitor visitor =
1869 {
1870 aarch64_ftrace_insn_reloc_b,
1871 aarch64_ftrace_insn_reloc_b_cond,
1872 aarch64_ftrace_insn_reloc_cb,
1873 aarch64_ftrace_insn_reloc_tb,
1874 aarch64_ftrace_insn_reloc_adr,
1875 aarch64_ftrace_insn_reloc_ldr_literal,
1876 aarch64_ftrace_insn_reloc_others,
1877 };
1878
1879 /* Implementation of linux_target_ops method
1880 "install_fast_tracepoint_jump_pad". */
1881
1882 static int
1883 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1884 CORE_ADDR tpaddr,
1885 CORE_ADDR collector,
1886 CORE_ADDR lockaddr,
1887 ULONGEST orig_size,
1888 CORE_ADDR *jump_entry,
1889 CORE_ADDR *trampoline,
1890 ULONGEST *trampoline_size,
1891 unsigned char *jjump_pad_insn,
1892 ULONGEST *jjump_pad_insn_size,
1893 CORE_ADDR *adjusted_insn_addr,
1894 CORE_ADDR *adjusted_insn_addr_end,
1895 char *err)
1896 {
1897 uint32_t buf[256];
1898 uint32_t *p = buf;
1899 int64_t offset;
1900 int i;
1901 uint32_t insn;
1902 CORE_ADDR buildaddr = *jump_entry;
1903 struct aarch64_insn_relocation_data insn_data;
1904
1905 /* We need to save the current state on the stack both to restore it
1906 later and to collect register values when the tracepoint is hit.
1907
1908 The saved registers are pushed in a layout that needs to be in sync
1909 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1910 the supply_fast_tracepoint_registers function will fill in the
1911 register cache from a pointer to saved registers on the stack we build
1912 here.
1913
1914 For simplicity, we set the size of each cell on the stack to 16 bytes.
1915 This way one cell can hold any register type, from system registers
1916 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1917 has to be 16 bytes aligned anyway.
1918
1919 Note that the CPSR register does not exist on AArch64. Instead we
1920 can access system bits describing the process state with the
1921 MRS/MSR instructions, namely the condition flags. We save them as
1922 if they are part of a CPSR register because that's how GDB
1923 interprets these system bits. At the moment, only the condition
1924 flags are saved in CPSR (NZCV).
1925
1926 Stack layout, each cell is 16 bytes (descending):
1927
1928 High *-------- SIMD&FP registers from 31 down to 0. --------*
1929 | q31 |
1930 . .
1931 . . 32 cells
1932 . .
1933 | q0 |
1934 *---- General purpose registers from 30 down to 0. ----*
1935 | x30 |
1936 . .
1937 . . 31 cells
1938 . .
1939 | x0 |
1940 *------------- Special purpose registers. -------------*
1941 | SP |
1942 | PC |
1943 | CPSR (NZCV) | 5 cells
1944 | FPSR |
1945 | FPCR | <- SP + 16
1946 *------------- collecting_t object --------------------*
1947 | TPIDR_EL0 | struct tracepoint * |
1948 Low *------------------------------------------------------*
1949
1950 After this stack is set up, we issue a call to the collector, passing
1951 it the saved registers at (SP + 16). */
1952
1953 /* Push SIMD&FP registers on the stack:
1954
1955 SUB sp, sp, #(32 * 16)
1956
1957 STP q30, q31, [sp, #(30 * 16)]
1958 ...
1959 STP q0, q1, [sp]
1960
1961 */
1962 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1963 for (i = 30; i >= 0; i -= 2)
1964 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1965
1966 /* Push general purpose registers on the stack. Note that we do not need
1967 to push x31 as it represents the xzr register and not the stack
1968 pointer in a STR instruction.
1969
1970 SUB sp, sp, #(31 * 16)
1971
1972 STR x30, [sp, #(30 * 16)]
1973 ...
1974 STR x0, [sp]
1975
1976 */
1977 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1978 for (i = 30; i >= 0; i -= 1)
1979 p += emit_str (p, aarch64_register (i, 1), sp,
1980 offset_memory_operand (i * 16));
1981
1982 /* Make space for 5 more cells.
1983
1984 SUB sp, sp, #(5 * 16)
1985
1986 */
1987 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1988
1989
1990 /* Save SP:
1991
1992 ADD x4, sp, #((32 + 31 + 5) * 16)
1993 STR x4, [sp, #(4 * 16)]
1994
1995 */
1996 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1997 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1998
1999 /* Save PC (tracepoint address):
2000
2001 MOV x3, #(tpaddr)
2002 ...
2003
2004 STR x3, [sp, #(3 * 16)]
2005
2006 */
2007
2008 p += emit_mov_addr (p, x3, tpaddr);
2009 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2010
2011 /* Save CPSR (NZCV), FPSR and FPCR:
2012
2013 MRS x2, nzcv
2014 MRS x1, fpsr
2015 MRS x0, fpcr
2016
2017 STR x2, [sp, #(2 * 16)]
2018 STR x1, [sp, #(1 * 16)]
2019 STR x0, [sp, #(0 * 16)]
2020
2021 */
2022 p += emit_mrs (p, x2, NZCV);
2023 p += emit_mrs (p, x1, FPSR);
2024 p += emit_mrs (p, x0, FPCR);
2025 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2026 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2027 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2028
2029 /* Push the collecting_t object. It consist of the address of the
2030 tracepoint and an ID for the current thread. We get the latter by
2031 reading the tpidr_el0 system register. It corresponds to the
2032 NT_ARM_TLS register accessible with ptrace.
2033
2034 MOV x0, #(tpoint)
2035 ...
2036
2037 MRS x1, tpidr_el0
2038
2039 STP x0, x1, [sp, #-16]!
2040
2041 */
2042
2043 p += emit_mov_addr (p, x0, tpoint);
2044 p += emit_mrs (p, x1, TPIDR_EL0);
2045 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2046
2047 /* Spin-lock:
2048
2049 The shared memory for the lock is at lockaddr. It will hold zero
2050 if no-one is holding the lock, otherwise it contains the address of
2051 the collecting_t object on the stack of the thread which acquired it.
2052
2053 At this stage, the stack pointer points to this thread's collecting_t
2054 object.
2055
2056 We use the following registers:
2057 - x0: Address of the lock.
2058 - x1: Pointer to collecting_t object.
2059 - x2: Scratch register.
2060
2061 MOV x0, #(lockaddr)
2062 ...
2063 MOV x1, sp
2064
2065 ; Trigger an event local to this core. So the following WFE
2066 ; instruction is ignored.
2067 SEVL
2068 again:
2069 ; Wait for an event. The event is triggered by either the SEVL
2070 ; or STLR instructions (store release).
2071 WFE
2072
2073 ; Atomically read at lockaddr. This marks the memory location as
2074 ; exclusive. This instruction also has memory constraints which
2075 ; make sure all previous data reads and writes are done before
2076 ; executing it.
2077 LDAXR x2, [x0]
2078
2079 ; Try again if another thread holds the lock.
2080 CBNZ x2, again
2081
2082 ; We can lock it! Write the address of the collecting_t object.
2083 ; This instruction will fail if the memory location is not marked
2084 ; as exclusive anymore. If it succeeds, it will remove the
2085 ; exclusive mark on the memory location. This way, if another
2086 ; thread executes this instruction before us, we will fail and try
2087 ; all over again.
2088 STXR w2, x1, [x0]
2089 CBNZ w2, again
2090
2091 */
2092
2093 p += emit_mov_addr (p, x0, lockaddr);
2094 p += emit_mov (p, x1, register_operand (sp));
2095
2096 p += emit_sevl (p);
2097 p += emit_wfe (p);
2098 p += emit_ldaxr (p, x2, x0);
2099 p += emit_cb (p, 1, w2, -2 * 4);
2100 p += emit_stxr (p, w2, x1, x0);
2101 p += emit_cb (p, 1, x2, -4 * 4);
2102
2103 /* Call collector (struct tracepoint *, unsigned char *):
2104
2105 MOV x0, #(tpoint)
2106 ...
2107
2108 ; Saved registers start after the collecting_t object.
2109 ADD x1, sp, #16
2110
2111 ; We use an intra-procedure-call scratch register.
2112 MOV ip0, #(collector)
2113 ...
2114
2115 ; And call back to C!
2116 BLR ip0
2117
2118 */
2119
2120 p += emit_mov_addr (p, x0, tpoint);
2121 p += emit_add (p, x1, sp, immediate_operand (16));
2122
2123 p += emit_mov_addr (p, ip0, collector);
2124 p += emit_blr (p, ip0);
2125
2126 /* Release the lock.
2127
2128 MOV x0, #(lockaddr)
2129 ...
2130
2131 ; This instruction is a normal store with memory ordering
2132 ; constraints. Thanks to this we do not have to put a data
2133 ; barrier instruction to make sure all data read and writes are done
2134 ; before this instruction is executed. Furthermore, this instruction
2135 ; will trigger an event, letting other threads know they can grab
2136 ; the lock.
2137 STLR xzr, [x0]
2138
2139 */
2140 p += emit_mov_addr (p, x0, lockaddr);
2141 p += emit_stlr (p, xzr, x0);
2142
2143 /* Free collecting_t object:
2144
2145 ADD sp, sp, #16
2146
2147 */
2148 p += emit_add (p, sp, sp, immediate_operand (16));
2149
2150 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2151 registers from the stack.
2152
2153 LDR x2, [sp, #(2 * 16)]
2154 LDR x1, [sp, #(1 * 16)]
2155 LDR x0, [sp, #(0 * 16)]
2156
2157 MSR NZCV, x2
2158 MSR FPSR, x1
2159 MSR FPCR, x0
2160
2161 ADD sp, sp #(5 * 16)
2162
2163 */
2164 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2165 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2166 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2167 p += emit_msr (p, NZCV, x2);
2168 p += emit_msr (p, FPSR, x1);
2169 p += emit_msr (p, FPCR, x0);
2170
2171 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2172
2173 /* Pop general purpose registers:
2174
2175 LDR x0, [sp]
2176 ...
2177 LDR x30, [sp, #(30 * 16)]
2178
2179 ADD sp, sp, #(31 * 16)
2180
2181 */
2182 for (i = 0; i <= 30; i += 1)
2183 p += emit_ldr (p, aarch64_register (i, 1), sp,
2184 offset_memory_operand (i * 16));
2185 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2186
2187 /* Pop SIMD&FP registers:
2188
2189 LDP q0, q1, [sp]
2190 ...
2191 LDP q30, q31, [sp, #(30 * 16)]
2192
2193 ADD sp, sp, #(32 * 16)
2194
2195 */
2196 for (i = 0; i <= 30; i += 2)
2197 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2198 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2199
2200 /* Write the code into the inferior memory. */
2201 append_insns (&buildaddr, p - buf, buf);
2202
2203 /* Now emit the relocated instruction. */
2204 *adjusted_insn_addr = buildaddr;
2205 target_read_uint32 (tpaddr, &insn);
2206
2207 insn_data.base.insn_addr = tpaddr;
2208 insn_data.new_addr = buildaddr;
2209 insn_data.insn_ptr = buf;
2210
2211 aarch64_relocate_instruction (insn, &visitor,
2212 (struct aarch64_insn_data *) &insn_data);
2213
2214 /* We may not have been able to relocate the instruction. */
2215 if (insn_data.insn_ptr == buf)
2216 {
2217 sprintf (err,
2218 "E.Could not relocate instruction from %s to %s.",
2219 core_addr_to_string_nz (tpaddr),
2220 core_addr_to_string_nz (buildaddr));
2221 return 1;
2222 }
2223 else
2224 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2225 *adjusted_insn_addr_end = buildaddr;
2226
2227 /* Go back to the start of the buffer. */
2228 p = buf;
2229
2230 /* Emit a branch back from the jump pad. */
2231 offset = (tpaddr + orig_size - buildaddr);
2232 if (!can_encode_int32 (offset, 28))
2233 {
2234 sprintf (err,
2235 "E.Jump back from jump pad too far from tracepoint "
2236 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2237 offset);
2238 return 1;
2239 }
2240
2241 p += emit_b (p, 0, offset);
2242 append_insns (&buildaddr, p - buf, buf);
2243
2244 /* Give the caller a branch instruction into the jump pad. */
2245 offset = (*jump_entry - tpaddr);
2246 if (!can_encode_int32 (offset, 28))
2247 {
2248 sprintf (err,
2249 "E.Jump pad too far from tracepoint "
2250 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2251 offset);
2252 return 1;
2253 }
2254
2255 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2256 *jjump_pad_insn_size = 4;
2257
2258 /* Return the end address of our pad. */
2259 *jump_entry = buildaddr;
2260
2261 return 0;
2262 }
2263
2264 /* Helper function writing LEN instructions from START into
2265 current_insn_ptr. */
2266
2267 static void
2268 emit_ops_insns (const uint32_t *start, int len)
2269 {
2270 CORE_ADDR buildaddr = current_insn_ptr;
2271
2272 if (debug_threads)
2273 debug_printf ("Adding %d instrucions at %s\n",
2274 len, paddress (buildaddr));
2275
2276 append_insns (&buildaddr, len, start);
2277 current_insn_ptr = buildaddr;
2278 }
2279
2280 /* Pop a register from the stack. */
2281
2282 static int
2283 emit_pop (uint32_t *buf, struct aarch64_register rt)
2284 {
2285 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2286 }
2287
2288 /* Push a register on the stack. */
2289
2290 static int
2291 emit_push (uint32_t *buf, struct aarch64_register rt)
2292 {
2293 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2294 }
2295
2296 /* Implementation of emit_ops method "emit_prologue". */
2297
2298 static void
2299 aarch64_emit_prologue (void)
2300 {
2301 uint32_t buf[16];
2302 uint32_t *p = buf;
2303
2304 /* This function emit a prologue for the following function prototype:
2305
2306 enum eval_result_type f (unsigned char *regs,
2307 ULONGEST *value);
2308
2309 The first argument is a buffer of raw registers. The second
2310 argument is the result of
2311 evaluating the expression, which will be set to whatever is on top of
2312 the stack at the end.
2313
2314 The stack set up by the prologue is as such:
2315
2316 High *------------------------------------------------------*
2317 | LR |
2318 | FP | <- FP
2319 | x1 (ULONGEST *value) |
2320 | x0 (unsigned char *regs) |
2321 Low *------------------------------------------------------*
2322
2323 As we are implementing a stack machine, each opcode can expand the
2324 stack so we never know how far we are from the data saved by this
2325 prologue. In order to be able refer to value and regs later, we save
2326 the current stack pointer in the frame pointer. This way, it is not
2327 clobbered when calling C functions.
2328
2329 Finally, throughout every operation, we are using register x0 as the
2330 top of the stack, and x1 as a scratch register. */
2331
2332 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2333 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2334 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2335
2336 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2337
2338
2339 emit_ops_insns (buf, p - buf);
2340 }
2341
2342 /* Implementation of emit_ops method "emit_epilogue". */
2343
2344 static void
2345 aarch64_emit_epilogue (void)
2346 {
2347 uint32_t buf[16];
2348 uint32_t *p = buf;
2349
2350 /* Store the result of the expression (x0) in *value. */
2351 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2352 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2353 p += emit_str (p, x0, x1, offset_memory_operand (0));
2354
2355 /* Restore the previous state. */
2356 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2357 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2358
2359 /* Return expr_eval_no_error. */
2360 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2361 p += emit_ret (p, lr);
2362
2363 emit_ops_insns (buf, p - buf);
2364 }
2365
2366 /* Implementation of emit_ops method "emit_add". */
2367
2368 static void
2369 aarch64_emit_add (void)
2370 {
2371 uint32_t buf[16];
2372 uint32_t *p = buf;
2373
2374 p += emit_pop (p, x1);
2375 p += emit_add (p, x0, x1, register_operand (x0));
2376
2377 emit_ops_insns (buf, p - buf);
2378 }
2379
2380 /* Implementation of emit_ops method "emit_sub". */
2381
2382 static void
2383 aarch64_emit_sub (void)
2384 {
2385 uint32_t buf[16];
2386 uint32_t *p = buf;
2387
2388 p += emit_pop (p, x1);
2389 p += emit_sub (p, x0, x1, register_operand (x0));
2390
2391 emit_ops_insns (buf, p - buf);
2392 }
2393
2394 /* Implementation of emit_ops method "emit_mul". */
2395
2396 static void
2397 aarch64_emit_mul (void)
2398 {
2399 uint32_t buf[16];
2400 uint32_t *p = buf;
2401
2402 p += emit_pop (p, x1);
2403 p += emit_mul (p, x0, x1, x0);
2404
2405 emit_ops_insns (buf, p - buf);
2406 }
2407
2408 /* Implementation of emit_ops method "emit_lsh". */
2409
2410 static void
2411 aarch64_emit_lsh (void)
2412 {
2413 uint32_t buf[16];
2414 uint32_t *p = buf;
2415
2416 p += emit_pop (p, x1);
2417 p += emit_lslv (p, x0, x1, x0);
2418
2419 emit_ops_insns (buf, p - buf);
2420 }
2421
2422 /* Implementation of emit_ops method "emit_rsh_signed". */
2423
2424 static void
2425 aarch64_emit_rsh_signed (void)
2426 {
2427 uint32_t buf[16];
2428 uint32_t *p = buf;
2429
2430 p += emit_pop (p, x1);
2431 p += emit_asrv (p, x0, x1, x0);
2432
2433 emit_ops_insns (buf, p - buf);
2434 }
2435
2436 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2437
2438 static void
2439 aarch64_emit_rsh_unsigned (void)
2440 {
2441 uint32_t buf[16];
2442 uint32_t *p = buf;
2443
2444 p += emit_pop (p, x1);
2445 p += emit_lsrv (p, x0, x1, x0);
2446
2447 emit_ops_insns (buf, p - buf);
2448 }
2449
2450 /* Implementation of emit_ops method "emit_ext". */
2451
2452 static void
2453 aarch64_emit_ext (int arg)
2454 {
2455 uint32_t buf[16];
2456 uint32_t *p = buf;
2457
2458 p += emit_sbfx (p, x0, x0, 0, arg);
2459
2460 emit_ops_insns (buf, p - buf);
2461 }
2462
2463 /* Implementation of emit_ops method "emit_log_not". */
2464
2465 static void
2466 aarch64_emit_log_not (void)
2467 {
2468 uint32_t buf[16];
2469 uint32_t *p = buf;
2470
2471 /* If the top of the stack is 0, replace it with 1. Else replace it with
2472 0. */
2473
2474 p += emit_cmp (p, x0, immediate_operand (0));
2475 p += emit_cset (p, x0, EQ);
2476
2477 emit_ops_insns (buf, p - buf);
2478 }
2479
2480 /* Implementation of emit_ops method "emit_bit_and". */
2481
2482 static void
2483 aarch64_emit_bit_and (void)
2484 {
2485 uint32_t buf[16];
2486 uint32_t *p = buf;
2487
2488 p += emit_pop (p, x1);
2489 p += emit_and (p, x0, x0, x1);
2490
2491 emit_ops_insns (buf, p - buf);
2492 }
2493
2494 /* Implementation of emit_ops method "emit_bit_or". */
2495
2496 static void
2497 aarch64_emit_bit_or (void)
2498 {
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 p += emit_pop (p, x1);
2503 p += emit_orr (p, x0, x0, x1);
2504
2505 emit_ops_insns (buf, p - buf);
2506 }
2507
2508 /* Implementation of emit_ops method "emit_bit_xor". */
2509
2510 static void
2511 aarch64_emit_bit_xor (void)
2512 {
2513 uint32_t buf[16];
2514 uint32_t *p = buf;
2515
2516 p += emit_pop (p, x1);
2517 p += emit_eor (p, x0, x0, x1);
2518
2519 emit_ops_insns (buf, p - buf);
2520 }
2521
2522 /* Implementation of emit_ops method "emit_bit_not". */
2523
2524 static void
2525 aarch64_emit_bit_not (void)
2526 {
2527 uint32_t buf[16];
2528 uint32_t *p = buf;
2529
2530 p += emit_mvn (p, x0, x0);
2531
2532 emit_ops_insns (buf, p - buf);
2533 }
2534
2535 /* Implementation of emit_ops method "emit_equal". */
2536
2537 static void
2538 aarch64_emit_equal (void)
2539 {
2540 uint32_t buf[16];
2541 uint32_t *p = buf;
2542
2543 p += emit_pop (p, x1);
2544 p += emit_cmp (p, x0, register_operand (x1));
2545 p += emit_cset (p, x0, EQ);
2546
2547 emit_ops_insns (buf, p - buf);
2548 }
2549
2550 /* Implementation of emit_ops method "emit_less_signed". */
2551
2552 static void
2553 aarch64_emit_less_signed (void)
2554 {
2555 uint32_t buf[16];
2556 uint32_t *p = buf;
2557
2558 p += emit_pop (p, x1);
2559 p += emit_cmp (p, x1, register_operand (x0));
2560 p += emit_cset (p, x0, LT);
2561
2562 emit_ops_insns (buf, p - buf);
2563 }
2564
2565 /* Implementation of emit_ops method "emit_less_unsigned". */
2566
2567 static void
2568 aarch64_emit_less_unsigned (void)
2569 {
2570 uint32_t buf[16];
2571 uint32_t *p = buf;
2572
2573 p += emit_pop (p, x1);
2574 p += emit_cmp (p, x1, register_operand (x0));
2575 p += emit_cset (p, x0, LO);
2576
2577 emit_ops_insns (buf, p - buf);
2578 }
2579
2580 /* Implementation of emit_ops method "emit_ref". */
2581
2582 static void
2583 aarch64_emit_ref (int size)
2584 {
2585 uint32_t buf[16];
2586 uint32_t *p = buf;
2587
2588 switch (size)
2589 {
2590 case 1:
2591 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2592 break;
2593 case 2:
2594 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2595 break;
2596 case 4:
2597 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2598 break;
2599 case 8:
2600 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2601 break;
2602 default:
2603 /* Unknown size, bail on compilation. */
2604 emit_error = 1;
2605 break;
2606 }
2607
2608 emit_ops_insns (buf, p - buf);
2609 }
2610
2611 /* Implementation of emit_ops method "emit_if_goto". */
2612
2613 static void
2614 aarch64_emit_if_goto (int *offset_p, int *size_p)
2615 {
2616 uint32_t buf[16];
2617 uint32_t *p = buf;
2618
2619 /* The Z flag is set or cleared here. */
2620 p += emit_cmp (p, x0, immediate_operand (0));
2621 /* This instruction must not change the Z flag. */
2622 p += emit_pop (p, x0);
2623 /* Branch over the next instruction if x0 == 0. */
2624 p += emit_bcond (p, EQ, 8);
2625
2626 /* The NOP instruction will be patched with an unconditional branch. */
2627 if (offset_p)
2628 *offset_p = (p - buf) * 4;
2629 if (size_p)
2630 *size_p = 4;
2631 p += emit_nop (p);
2632
2633 emit_ops_insns (buf, p - buf);
2634 }
2635
2636 /* Implementation of emit_ops method "emit_goto". */
2637
2638 static void
2639 aarch64_emit_goto (int *offset_p, int *size_p)
2640 {
2641 uint32_t buf[16];
2642 uint32_t *p = buf;
2643
2644 /* The NOP instruction will be patched with an unconditional branch. */
2645 if (offset_p)
2646 *offset_p = 0;
2647 if (size_p)
2648 *size_p = 4;
2649 p += emit_nop (p);
2650
2651 emit_ops_insns (buf, p - buf);
2652 }
2653
2654 /* Implementation of emit_ops method "write_goto_address". */
2655
2656 static void
2657 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2658 {
2659 uint32_t insn;
2660
2661 emit_b (&insn, 0, to - from);
2662 append_insns (&from, 1, &insn);
2663 }
2664
2665 /* Implementation of emit_ops method "emit_const". */
2666
2667 static void
2668 aarch64_emit_const (LONGEST num)
2669 {
2670 uint32_t buf[16];
2671 uint32_t *p = buf;
2672
2673 p += emit_mov_addr (p, x0, num);
2674
2675 emit_ops_insns (buf, p - buf);
2676 }
2677
2678 /* Implementation of emit_ops method "emit_call". */
2679
2680 static void
2681 aarch64_emit_call (CORE_ADDR fn)
2682 {
2683 uint32_t buf[16];
2684 uint32_t *p = buf;
2685
2686 p += emit_mov_addr (p, ip0, fn);
2687 p += emit_blr (p, ip0);
2688
2689 emit_ops_insns (buf, p - buf);
2690 }
2691
2692 /* Implementation of emit_ops method "emit_reg". */
2693
2694 static void
2695 aarch64_emit_reg (int reg)
2696 {
2697 uint32_t buf[16];
2698 uint32_t *p = buf;
2699
2700 /* Set x0 to unsigned char *regs. */
2701 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2702 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2703 p += emit_mov (p, x1, immediate_operand (reg));
2704
2705 emit_ops_insns (buf, p - buf);
2706
2707 aarch64_emit_call (get_raw_reg_func_addr ());
2708 }
2709
2710 /* Implementation of emit_ops method "emit_pop". */
2711
2712 static void
2713 aarch64_emit_pop (void)
2714 {
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 p += emit_pop (p, x0);
2719
2720 emit_ops_insns (buf, p - buf);
2721 }
2722
2723 /* Implementation of emit_ops method "emit_stack_flush". */
2724
2725 static void
2726 aarch64_emit_stack_flush (void)
2727 {
2728 uint32_t buf[16];
2729 uint32_t *p = buf;
2730
2731 p += emit_push (p, x0);
2732
2733 emit_ops_insns (buf, p - buf);
2734 }
2735
2736 /* Implementation of emit_ops method "emit_zero_ext". */
2737
2738 static void
2739 aarch64_emit_zero_ext (int arg)
2740 {
2741 uint32_t buf[16];
2742 uint32_t *p = buf;
2743
2744 p += emit_ubfx (p, x0, x0, 0, arg);
2745
2746 emit_ops_insns (buf, p - buf);
2747 }
2748
2749 /* Implementation of emit_ops method "emit_swap". */
2750
2751 static void
2752 aarch64_emit_swap (void)
2753 {
2754 uint32_t buf[16];
2755 uint32_t *p = buf;
2756
2757 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2758 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2759 p += emit_mov (p, x0, register_operand (x1));
2760
2761 emit_ops_insns (buf, p - buf);
2762 }
2763
2764 /* Implementation of emit_ops method "emit_stack_adjust". */
2765
2766 static void
2767 aarch64_emit_stack_adjust (int n)
2768 {
2769 /* This is not needed with our design. */
2770 uint32_t buf[16];
2771 uint32_t *p = buf;
2772
2773 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2774
2775 emit_ops_insns (buf, p - buf);
2776 }
2777
2778 /* Implementation of emit_ops method "emit_int_call_1". */
2779
2780 static void
2781 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2782 {
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 p += emit_mov (p, x0, immediate_operand (arg1));
2787
2788 emit_ops_insns (buf, p - buf);
2789
2790 aarch64_emit_call (fn);
2791 }
2792
2793 /* Implementation of emit_ops method "emit_void_call_2". */
2794
2795 static void
2796 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2797 {
2798 uint32_t buf[16];
2799 uint32_t *p = buf;
2800
2801 /* Push x0 on the stack. */
2802 aarch64_emit_stack_flush ();
2803
2804 /* Setup arguments for the function call:
2805
2806 x0: arg1
2807 x1: top of the stack
2808
2809 MOV x1, x0
2810 MOV x0, #arg1 */
2811
2812 p += emit_mov (p, x1, register_operand (x0));
2813 p += emit_mov (p, x0, immediate_operand (arg1));
2814
2815 emit_ops_insns (buf, p - buf);
2816
2817 aarch64_emit_call (fn);
2818
2819 /* Restore x0. */
2820 aarch64_emit_pop ();
2821 }
2822
2823 /* Implementation of emit_ops method "emit_eq_goto". */
2824
2825 static void
2826 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2827 {
2828 uint32_t buf[16];
2829 uint32_t *p = buf;
2830
2831 p += emit_pop (p, x1);
2832 p += emit_cmp (p, x1, register_operand (x0));
2833 /* Branch over the next instruction if x0 != x1. */
2834 p += emit_bcond (p, NE, 8);
2835 /* The NOP instruction will be patched with an unconditional branch. */
2836 if (offset_p)
2837 *offset_p = (p - buf) * 4;
2838 if (size_p)
2839 *size_p = 4;
2840 p += emit_nop (p);
2841
2842 emit_ops_insns (buf, p - buf);
2843 }
2844
2845 /* Implementation of emit_ops method "emit_ne_goto". */
2846
2847 static void
2848 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2849 {
2850 uint32_t buf[16];
2851 uint32_t *p = buf;
2852
2853 p += emit_pop (p, x1);
2854 p += emit_cmp (p, x1, register_operand (x0));
2855 /* Branch over the next instruction if x0 == x1. */
2856 p += emit_bcond (p, EQ, 8);
2857 /* The NOP instruction will be patched with an unconditional branch. */
2858 if (offset_p)
2859 *offset_p = (p - buf) * 4;
2860 if (size_p)
2861 *size_p = 4;
2862 p += emit_nop (p);
2863
2864 emit_ops_insns (buf, p - buf);
2865 }
2866
2867 /* Implementation of emit_ops method "emit_lt_goto". */
2868
2869 static void
2870 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2871 {
2872 uint32_t buf[16];
2873 uint32_t *p = buf;
2874
2875 p += emit_pop (p, x1);
2876 p += emit_cmp (p, x1, register_operand (x0));
2877 /* Branch over the next instruction if x0 >= x1. */
2878 p += emit_bcond (p, GE, 8);
2879 /* The NOP instruction will be patched with an unconditional branch. */
2880 if (offset_p)
2881 *offset_p = (p - buf) * 4;
2882 if (size_p)
2883 *size_p = 4;
2884 p += emit_nop (p);
2885
2886 emit_ops_insns (buf, p - buf);
2887 }
2888
2889 /* Implementation of emit_ops method "emit_le_goto". */
2890
2891 static void
2892 aarch64_emit_le_goto (int *offset_p, int *size_p)
2893 {
2894 uint32_t buf[16];
2895 uint32_t *p = buf;
2896
2897 p += emit_pop (p, x1);
2898 p += emit_cmp (p, x1, register_operand (x0));
2899 /* Branch over the next instruction if x0 > x1. */
2900 p += emit_bcond (p, GT, 8);
2901 /* The NOP instruction will be patched with an unconditional branch. */
2902 if (offset_p)
2903 *offset_p = (p - buf) * 4;
2904 if (size_p)
2905 *size_p = 4;
2906 p += emit_nop (p);
2907
2908 emit_ops_insns (buf, p - buf);
2909 }
2910
2911 /* Implementation of emit_ops method "emit_gt_goto". */
2912
2913 static void
2914 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2915 {
2916 uint32_t buf[16];
2917 uint32_t *p = buf;
2918
2919 p += emit_pop (p, x1);
2920 p += emit_cmp (p, x1, register_operand (x0));
2921 /* Branch over the next instruction if x0 <= x1. */
2922 p += emit_bcond (p, LE, 8);
2923 /* The NOP instruction will be patched with an unconditional branch. */
2924 if (offset_p)
2925 *offset_p = (p - buf) * 4;
2926 if (size_p)
2927 *size_p = 4;
2928 p += emit_nop (p);
2929
2930 emit_ops_insns (buf, p - buf);
2931 }
2932
2933 /* Implementation of emit_ops method "emit_ge_got". */
2934
2935 static void
2936 aarch64_emit_ge_got (int *offset_p, int *size_p)
2937 {
2938 uint32_t buf[16];
2939 uint32_t *p = buf;
2940
2941 p += emit_pop (p, x1);
2942 p += emit_cmp (p, x1, register_operand (x0));
2943 /* Branch over the next instruction if x0 <= x1. */
2944 p += emit_bcond (p, LT, 8);
2945 /* The NOP instruction will be patched with an unconditional branch. */
2946 if (offset_p)
2947 *offset_p = (p - buf) * 4;
2948 if (size_p)
2949 *size_p = 4;
2950 p += emit_nop (p);
2951
2952 emit_ops_insns (buf, p - buf);
2953 }
2954
2955 static struct emit_ops aarch64_emit_ops_impl =
2956 {
2957 aarch64_emit_prologue,
2958 aarch64_emit_epilogue,
2959 aarch64_emit_add,
2960 aarch64_emit_sub,
2961 aarch64_emit_mul,
2962 aarch64_emit_lsh,
2963 aarch64_emit_rsh_signed,
2964 aarch64_emit_rsh_unsigned,
2965 aarch64_emit_ext,
2966 aarch64_emit_log_not,
2967 aarch64_emit_bit_and,
2968 aarch64_emit_bit_or,
2969 aarch64_emit_bit_xor,
2970 aarch64_emit_bit_not,
2971 aarch64_emit_equal,
2972 aarch64_emit_less_signed,
2973 aarch64_emit_less_unsigned,
2974 aarch64_emit_ref,
2975 aarch64_emit_if_goto,
2976 aarch64_emit_goto,
2977 aarch64_write_goto_address,
2978 aarch64_emit_const,
2979 aarch64_emit_call,
2980 aarch64_emit_reg,
2981 aarch64_emit_pop,
2982 aarch64_emit_stack_flush,
2983 aarch64_emit_zero_ext,
2984 aarch64_emit_swap,
2985 aarch64_emit_stack_adjust,
2986 aarch64_emit_int_call_1,
2987 aarch64_emit_void_call_2,
2988 aarch64_emit_eq_goto,
2989 aarch64_emit_ne_goto,
2990 aarch64_emit_lt_goto,
2991 aarch64_emit_le_goto,
2992 aarch64_emit_gt_goto,
2993 aarch64_emit_ge_got,
2994 };
2995
2996 /* Implementation of linux_target_ops method "emit_ops". */
2997
2998 static struct emit_ops *
2999 aarch64_emit_ops (void)
3000 {
3001 return &aarch64_emit_ops_impl;
3002 }
3003
3004 /* Implementation of linux_target_ops method
3005 "get_min_fast_tracepoint_insn_len". */
3006
3007 static int
3008 aarch64_get_min_fast_tracepoint_insn_len (void)
3009 {
3010 return 4;
3011 }
3012
3013 /* Implementation of linux_target_ops method "supports_range_stepping". */
3014
3015 static int
3016 aarch64_supports_range_stepping (void)
3017 {
3018 return 1;
3019 }
3020
3021 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3022
3023 static const gdb_byte *
3024 aarch64_sw_breakpoint_from_kind (int kind, int *size)
3025 {
3026 if (is_64bit_tdesc ())
3027 {
3028 *size = aarch64_breakpoint_len;
3029 return aarch64_breakpoint;
3030 }
3031 else
3032 return arm_sw_breakpoint_from_kind (kind, size);
3033 }
3034
3035 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3036
3037 static int
3038 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3039 {
3040 if (is_64bit_tdesc ())
3041 return aarch64_breakpoint_len;
3042 else
3043 return arm_breakpoint_kind_from_pc (pcptr);
3044 }
3045
3046 /* Implementation of the linux_target_ops method
3047 "breakpoint_kind_from_current_state". */
3048
3049 static int
3050 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3051 {
3052 if (is_64bit_tdesc ())
3053 return aarch64_breakpoint_len;
3054 else
3055 return arm_breakpoint_kind_from_current_state (pcptr);
3056 }
3057
3058 /* Support for hardware single step. */
3059
3060 static int
3061 aarch64_supports_hardware_single_step (void)
3062 {
3063 return 1;
3064 }
3065
3066 struct linux_target_ops the_low_target =
3067 {
3068 aarch64_regs_info,
3069 NULL, /* cannot_fetch_register */
3070 NULL, /* cannot_store_register */
3071 NULL, /* fetch_register */
3072 aarch64_get_pc,
3073 aarch64_set_pc,
3074 aarch64_breakpoint_kind_from_pc,
3075 aarch64_sw_breakpoint_from_kind,
3076 NULL, /* get_next_pcs */
3077 0, /* decr_pc_after_break */
3078 aarch64_breakpoint_at,
3079 aarch64_supports_z_point_type,
3080 aarch64_insert_point,
3081 aarch64_remove_point,
3082 aarch64_stopped_by_watchpoint,
3083 aarch64_stopped_data_address,
3084 NULL, /* collect_ptrace_register */
3085 NULL, /* supply_ptrace_register */
3086 aarch64_linux_siginfo_fixup,
3087 aarch64_linux_new_process,
3088 aarch64_linux_delete_process,
3089 aarch64_linux_new_thread,
3090 aarch64_linux_delete_thread,
3091 aarch64_linux_new_fork,
3092 aarch64_linux_prepare_to_resume,
3093 NULL, /* process_qsupported */
3094 aarch64_supports_tracepoints,
3095 aarch64_get_thread_area,
3096 aarch64_install_fast_tracepoint_jump_pad,
3097 aarch64_emit_ops,
3098 aarch64_get_min_fast_tracepoint_insn_len,
3099 aarch64_supports_range_stepping,
3100 aarch64_breakpoint_kind_from_current_state,
3101 aarch64_supports_hardware_single_step,
3102 aarch64_get_syscall_trapinfo,
3103 };
3104
3105 /* The linux target ops object. */
3106
3107 linux_process_target *the_linux_target = &the_aarch64_target;
3108
3109 void
3110 initialize_low_arch (void)
3111 {
3112 initialize_low_arch_aarch32 ();
3113
3114 initialize_regsets_info (&aarch64_regsets_info);
3115 initialize_regsets_info (&aarch64_sve_regsets_info);
3116 }
This page took 0.161995 seconds and 3 git commands to generate.