New test case gdb.arch/disp-step-insn-reloc.exp
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
32d0add0 4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
41
42/* Defined in auto-generated files. */
43void init_registers_aarch64 (void);
3aee8918 44extern const struct target_desc *tdesc_aarch64;
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
50#define AARCH64_X_REGS_NUM 31
51#define AARCH64_V_REGS_NUM 32
52#define AARCH64_X0_REGNO 0
53#define AARCH64_SP_REGNO 31
54#define AARCH64_PC_REGNO 32
55#define AARCH64_CPSR_REGNO 33
56#define AARCH64_V0_REGNO 34
bf330350
CU
57#define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58#define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
176eb98c 59
bf330350 60#define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
176eb98c 61
176eb98c
MS
62/* Per-process arch-specific data we want to keep. */
63
64struct arch_process_info
65{
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77};
78
3b53ae99
YQ
79/* Return true if the size of register 0 is 8 byte. */
80
81static int
82is_64bit_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87}
88
421530db
PL
89/* Implementation of linux_target_ops method "cannot_store_register". */
90
176eb98c
MS
91static int
92aarch64_cannot_store_register (int regno)
93{
94 return regno >= AARCH64_NUM_REGS;
95}
96
421530db
PL
97/* Implementation of linux_target_ops method "cannot_fetch_register". */
98
176eb98c
MS
99static int
100aarch64_cannot_fetch_register (int regno)
101{
102 return regno >= AARCH64_NUM_REGS;
103}
104
105static void
106aarch64_fill_gregset (struct regcache *regcache, void *buf)
107{
108 struct user_pt_regs *regset = buf;
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116}
117
118static void
119aarch64_store_gregset (struct regcache *regcache, const void *buf)
120{
121 const struct user_pt_regs *regset = buf;
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129}
130
131static void
132aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133{
134 struct user_fpsimd_state *regset = buf;
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
141}
142
143static void
144aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145{
146 const struct user_fpsimd_state *regset = buf;
147 int i;
148
149 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
150 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
151 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
152 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
153}
154
176eb98c
MS
155/* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157extern int debug_threads;
158
421530db
PL
159/* Implementation of linux_target_ops method "get_pc". */
160
176eb98c
MS
161static CORE_ADDR
162aarch64_get_pc (struct regcache *regcache)
163{
8a7e4587
YQ
164 if (register_size (regcache->tdesc, 0) == 8)
165 {
166 unsigned long pc;
167
168 collect_register_by_name (regcache, "pc", &pc);
169 if (debug_threads)
170 debug_printf ("stop pc is %08lx\n", pc);
171 return pc;
172 }
173 else
174 {
175 unsigned int pc;
176
177 collect_register_by_name (regcache, "pc", &pc);
178 if (debug_threads)
179 debug_printf ("stop pc is %04x\n", pc);
180 return pc;
181 }
176eb98c
MS
182}
183
421530db
PL
184/* Implementation of linux_target_ops method "set_pc". */
185
176eb98c
MS
186static void
187aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
188{
8a7e4587
YQ
189 if (register_size (regcache->tdesc, 0) == 8)
190 {
191 unsigned long newpc = pc;
192 supply_register_by_name (regcache, "pc", &newpc);
193 }
194 else
195 {
196 unsigned int newpc = pc;
197 supply_register_by_name (regcache, "pc", &newpc);
198 }
176eb98c
MS
199}
200
176eb98c
MS
201#define aarch64_breakpoint_len 4
202
37d66942
PL
203/* AArch64 BRK software debug mode instruction.
204 This instruction needs to match gdb/aarch64-tdep.c
205 (aarch64_default_breakpoint). */
206static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 207
421530db
PL
208/* Implementation of linux_target_ops method "breakpoint_at". */
209
176eb98c
MS
210static int
211aarch64_breakpoint_at (CORE_ADDR where)
212{
37d66942 213 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 214
37d66942
PL
215 (*the_target->read_memory) (where, (unsigned char *) &insn,
216 aarch64_breakpoint_len);
217 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
176eb98c
MS
218 return 1;
219
220 return 0;
221}
222
176eb98c
MS
223static void
224aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
225{
226 int i;
227
228 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
229 {
230 state->dr_addr_bp[i] = 0;
231 state->dr_ctrl_bp[i] = 0;
232 state->dr_ref_count_bp[i] = 0;
233 }
234
235 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
236 {
237 state->dr_addr_wp[i] = 0;
238 state->dr_ctrl_wp[i] = 0;
239 state->dr_ref_count_wp[i] = 0;
240 }
241}
242
176eb98c
MS
243/* Return the pointer to the debug register state structure in the
244 current process' arch-specific data area. */
245
db3cb7cb 246struct aarch64_debug_reg_state *
88e2cf7e 247aarch64_get_debug_reg_state (pid_t pid)
176eb98c 248{
88e2cf7e 249 struct process_info *proc = find_process_pid (pid);
176eb98c 250
fe978cb0 251 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
252}
253
421530db
PL
254/* Implementation of linux_target_ops method "supports_z_point_type". */
255
4ff0d3d8
PA
256static int
257aarch64_supports_z_point_type (char z_type)
258{
259 switch (z_type)
260 {
96c97461 261 case Z_PACKET_SW_BP:
6085d6f6
YQ
262 {
263 if (!extended_protocol && is_64bit_tdesc ())
264 {
265 /* Only enable Z0 packet in non-multi-arch debugging. If
266 extended protocol is used, don't enable Z0 packet because
267 GDBserver may attach to 32-bit process. */
268 return 1;
269 }
270 else
271 {
272 /* Disable Z0 packet so that GDBserver doesn't have to handle
273 different breakpoint instructions (aarch64, arm, thumb etc)
274 in multi-arch debugging. */
275 return 0;
276 }
277 }
4ff0d3d8
PA
278 case Z_PACKET_HW_BP:
279 case Z_PACKET_WRITE_WP:
280 case Z_PACKET_READ_WP:
281 case Z_PACKET_ACCESS_WP:
282 return 1;
283 default:
4ff0d3d8
PA
284 return 0;
285 }
286}
287
421530db 288/* Implementation of linux_target_ops method "insert_point".
176eb98c 289
421530db
PL
290 It actually only records the info of the to-be-inserted bp/wp;
291 the actual insertion will happen when threads are resumed. */
176eb98c
MS
292
293static int
802e8e6d
PA
294aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
295 int len, struct raw_breakpoint *bp)
176eb98c
MS
296{
297 int ret;
4ff0d3d8 298 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
299 struct aarch64_debug_reg_state *state
300 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 301
c5e92cca 302 if (show_debug_regs)
176eb98c
MS
303 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
304 (unsigned long) addr, len);
305
802e8e6d
PA
306 /* Determine the type from the raw breakpoint type. */
307 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
308
309 if (targ_type != hw_execute)
39edd165
YQ
310 {
311 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
312 ret = aarch64_handle_watchpoint (targ_type, addr, len,
313 1 /* is_insert */, state);
314 else
315 ret = -1;
316 }
176eb98c
MS
317 else
318 ret =
c67ca4de
YQ
319 aarch64_handle_breakpoint (targ_type, addr, len, 1 /* is_insert */,
320 state);
176eb98c 321
60a191ed 322 if (show_debug_regs)
88e2cf7e
YQ
323 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
324 targ_type);
176eb98c
MS
325
326 return ret;
327}
328
421530db 329/* Implementation of linux_target_ops method "remove_point".
176eb98c 330
421530db
PL
331 It actually only records the info of the to-be-removed bp/wp,
332 the actual removal will be done when threads are resumed. */
176eb98c
MS
333
334static int
802e8e6d
PA
335aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
336 int len, struct raw_breakpoint *bp)
176eb98c
MS
337{
338 int ret;
4ff0d3d8 339 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
340 struct aarch64_debug_reg_state *state
341 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 342
c5e92cca 343 if (show_debug_regs)
176eb98c
MS
344 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
345 (unsigned long) addr, len);
346
802e8e6d
PA
347 /* Determine the type from the raw breakpoint type. */
348 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
349
350 /* Set up state pointers. */
351 if (targ_type != hw_execute)
352 ret =
c67ca4de
YQ
353 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
354 state);
176eb98c
MS
355 else
356 ret =
c67ca4de
YQ
357 aarch64_handle_breakpoint (targ_type, addr, len, 0 /* is_insert */,
358 state);
176eb98c 359
60a191ed 360 if (show_debug_regs)
88e2cf7e
YQ
361 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
362 targ_type);
176eb98c
MS
363
364 return ret;
365}
366
421530db 367/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
368
369static CORE_ADDR
370aarch64_stopped_data_address (void)
371{
372 siginfo_t siginfo;
373 int pid, i;
374 struct aarch64_debug_reg_state *state;
375
0bfdf32f 376 pid = lwpid_of (current_thread);
176eb98c
MS
377
378 /* Get the siginfo. */
379 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
380 return (CORE_ADDR) 0;
381
382 /* Need to be a hardware breakpoint/watchpoint trap. */
383 if (siginfo.si_signo != SIGTRAP
384 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
385 return (CORE_ADDR) 0;
386
387 /* Check if the address matches any watched address. */
88e2cf7e 388 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
389 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
390 {
391 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
392 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
393 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
394 if (state->dr_ref_count_wp[i]
395 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
396 && addr_trap >= addr_watch
397 && addr_trap < addr_watch + len)
398 return addr_trap;
399 }
400
401 return (CORE_ADDR) 0;
402}
403
421530db 404/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
405
406static int
407aarch64_stopped_by_watchpoint (void)
408{
409 if (aarch64_stopped_data_address () != 0)
410 return 1;
411 else
412 return 0;
413}
414
415/* Fetch the thread-local storage pointer for libthread_db. */
416
417ps_err_e
55fac6e0 418ps_get_thread_area (const struct ps_prochandle *ph,
176eb98c
MS
419 lwpid_t lwpid, int idx, void **base)
420{
a0cc84cd
YQ
421 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
422 is_64bit_tdesc ());
176eb98c
MS
423}
424
ade90bde
YQ
425/* Implementation of linux_target_ops method "siginfo_fixup". */
426
427static int
428aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
429{
430 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
431 if (!is_64bit_tdesc ())
432 {
433 if (direction == 0)
434 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
435 native);
436 else
437 aarch64_siginfo_from_compat_siginfo (native,
438 (struct compat_siginfo *) inf);
439
440 return 1;
441 }
442
443 return 0;
444}
445
421530db 446/* Implementation of linux_target_ops method "linux_new_process". */
176eb98c
MS
447
448static struct arch_process_info *
449aarch64_linux_new_process (void)
450{
8d749320 451 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
452
453 aarch64_init_debug_reg_state (&info->debug_reg_state);
454
455 return info;
456}
457
421530db
PL
458/* Implementation of linux_target_ops method "linux_new_fork". */
459
3a8a0396
DB
460static void
461aarch64_linux_new_fork (struct process_info *parent,
462 struct process_info *child)
463{
464 /* These are allocated by linux_add_process. */
61a7418c
DB
465 gdb_assert (parent->priv != NULL
466 && parent->priv->arch_private != NULL);
467 gdb_assert (child->priv != NULL
468 && child->priv->arch_private != NULL);
3a8a0396
DB
469
470 /* Linux kernel before 2.6.33 commit
471 72f674d203cd230426437cdcf7dd6f681dad8b0d
472 will inherit hardware debug registers from parent
473 on fork/vfork/clone. Newer Linux kernels create such tasks with
474 zeroed debug registers.
475
476 GDB core assumes the child inherits the watchpoints/hw
477 breakpoints of the parent, and will remove them all from the
478 forked off process. Copy the debug registers mirrors into the
479 new process so that all breakpoints and watchpoints can be
480 removed together. The debug registers mirror will become zeroed
481 in the end before detaching the forked off process, thus making
482 this compatible with older Linux kernels too. */
483
61a7418c 484 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
485}
486
3b53ae99
YQ
487/* Return the right target description according to the ELF file of
488 current thread. */
489
490static const struct target_desc *
491aarch64_linux_read_description (void)
492{
493 unsigned int machine;
494 int is_elf64;
495 int tid;
496
497 tid = lwpid_of (current_thread);
498
499 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
500
501 if (is_elf64)
502 return tdesc_aarch64;
503 else
504 return tdesc_arm_with_neon;
505}
506
421530db
PL
507/* Implementation of linux_target_ops method "arch_setup". */
508
176eb98c
MS
509static void
510aarch64_arch_setup (void)
511{
3b53ae99 512 current_process ()->tdesc = aarch64_linux_read_description ();
176eb98c 513
af1b22f3 514 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
515}
516
3aee8918 517static struct regset_info aarch64_regsets[] =
176eb98c
MS
518{
519 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
520 sizeof (struct user_pt_regs), GENERAL_REGS,
521 aarch64_fill_gregset, aarch64_store_gregset },
522 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
523 sizeof (struct user_fpsimd_state), FP_REGS,
524 aarch64_fill_fpregset, aarch64_store_fpregset
525 },
526 { 0, 0, 0, -1, -1, NULL, NULL }
527};
528
3aee8918
PA
529static struct regsets_info aarch64_regsets_info =
530 {
531 aarch64_regsets, /* regsets */
532 0, /* num_regsets */
533 NULL, /* disabled_regsets */
534 };
535
3b53ae99 536static struct regs_info regs_info_aarch64 =
3aee8918
PA
537 {
538 NULL, /* regset_bitmap */
c2d65f38 539 NULL, /* usrregs */
3aee8918
PA
540 &aarch64_regsets_info,
541 };
542
421530db
PL
543/* Implementation of linux_target_ops method "regs_info". */
544
3aee8918
PA
545static const struct regs_info *
546aarch64_regs_info (void)
547{
3b53ae99
YQ
548 if (is_64bit_tdesc ())
549 return &regs_info_aarch64;
550 else
551 return &regs_info_aarch32;
3aee8918
PA
552}
553
7671bf47
PL
554/* Implementation of linux_target_ops method "supports_tracepoints". */
555
556static int
557aarch64_supports_tracepoints (void)
558{
524b57e6
YQ
559 if (current_thread == NULL)
560 return 1;
561 else
562 {
563 /* We don't support tracepoints on aarch32 now. */
564 return is_64bit_tdesc ();
565 }
7671bf47
PL
566}
567
bb903df0
PL
568/* Implementation of linux_target_ops method "get_thread_area". */
569
570static int
571aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
572{
573 struct iovec iovec;
574 uint64_t reg;
575
576 iovec.iov_base = &reg;
577 iovec.iov_len = sizeof (reg);
578
579 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
580 return -1;
581
582 *addrp = reg;
583
584 return 0;
585}
586
afbe19f8
PL
587/* List of condition codes that we need. */
588
589enum aarch64_condition_codes
590{
591 EQ = 0x0,
592 NE = 0x1,
593 LO = 0x3,
594 GE = 0xa,
595 LT = 0xb,
596 GT = 0xc,
597 LE = 0xd,
bb903df0
PL
598};
599
bb903df0
PL
600/* Representation of an operand. At this time, it only supports register
601 and immediate types. */
602
603struct aarch64_operand
604{
605 /* Type of the operand. */
606 enum
607 {
608 OPERAND_IMMEDIATE,
609 OPERAND_REGISTER,
610 } type;
611 /* Value of the operand according to the type. */
612 union
613 {
614 uint32_t imm;
615 struct aarch64_register reg;
616 };
617};
618
619/* List of registers that we are currently using, we can add more here as
620 we need to use them. */
621
622/* General purpose scratch registers (64 bit). */
623static const struct aarch64_register x0 = { 0, 1 };
624static const struct aarch64_register x1 = { 1, 1 };
625static const struct aarch64_register x2 = { 2, 1 };
626static const struct aarch64_register x3 = { 3, 1 };
627static const struct aarch64_register x4 = { 4, 1 };
628
629/* General purpose scratch registers (32 bit). */
afbe19f8 630static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
631static const struct aarch64_register w2 = { 2, 0 };
632
633/* Intra-procedure scratch registers. */
634static const struct aarch64_register ip0 = { 16, 1 };
635
636/* Special purpose registers. */
afbe19f8
PL
637static const struct aarch64_register fp = { 29, 1 };
638static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
639static const struct aarch64_register sp = { 31, 1 };
640static const struct aarch64_register xzr = { 31, 1 };
641
642/* Dynamically allocate a new register. If we know the register
643 statically, we should make it a global as above instead of using this
644 helper function. */
645
646static struct aarch64_register
647aarch64_register (unsigned num, int is64)
648{
649 return (struct aarch64_register) { num, is64 };
650}
651
652/* Helper function to create a register operand, for instructions with
653 different types of operands.
654
655 For example:
656 p += emit_mov (p, x0, register_operand (x1)); */
657
658static struct aarch64_operand
659register_operand (struct aarch64_register reg)
660{
661 struct aarch64_operand operand;
662
663 operand.type = OPERAND_REGISTER;
664 operand.reg = reg;
665
666 return operand;
667}
668
669/* Helper function to create an immediate operand, for instructions with
670 different types of operands.
671
672 For example:
673 p += emit_mov (p, x0, immediate_operand (12)); */
674
675static struct aarch64_operand
676immediate_operand (uint32_t imm)
677{
678 struct aarch64_operand operand;
679
680 operand.type = OPERAND_IMMEDIATE;
681 operand.imm = imm;
682
683 return operand;
684}
685
bb903df0
PL
686/* Helper function to create an offset memory operand.
687
688 For example:
689 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
690
691static struct aarch64_memory_operand
692offset_memory_operand (int32_t offset)
693{
694 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
695}
696
697/* Helper function to create a pre-index memory operand.
698
699 For example:
700 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
701
702static struct aarch64_memory_operand
703preindex_memory_operand (int32_t index)
704{
705 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
706}
707
afbe19f8
PL
708/* Helper function to create a post-index memory operand.
709
710 For example:
711 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
712
713static struct aarch64_memory_operand
714postindex_memory_operand (int32_t index)
715{
716 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
717}
718
bb903df0
PL
719/* System control registers. These special registers can be written and
720 read with the MRS and MSR instructions.
721
722 - NZCV: Condition flags. GDB refers to this register under the CPSR
723 name.
724 - FPSR: Floating-point status register.
725 - FPCR: Floating-point control registers.
726 - TPIDR_EL0: Software thread ID register. */
727
728enum aarch64_system_control_registers
729{
730 /* op0 op1 crn crm op2 */
731 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
732 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
733 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
734 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
735};
736
bb903df0
PL
737/* Write a BLR instruction into *BUF.
738
739 BLR rn
740
741 RN is the register to branch to. */
742
743static int
744emit_blr (uint32_t *buf, struct aarch64_register rn)
745{
746 return emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
747}
748
afbe19f8 749/* Write a RET instruction into *BUF.
bb903df0 750
afbe19f8 751 RET xn
bb903df0 752
afbe19f8 753 RN is the register to branch to. */
bb903df0
PL
754
755static int
afbe19f8
PL
756emit_ret (uint32_t *buf, struct aarch64_register rn)
757{
758 return emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
759}
760
761static int
762emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
763 struct aarch64_register rt,
764 struct aarch64_register rt2,
765 struct aarch64_register rn,
766 struct aarch64_memory_operand operand)
bb903df0
PL
767{
768 uint32_t opc;
769 uint32_t pre_index;
770 uint32_t write_back;
771
772 if (rt.is64)
773 opc = ENCODE (2, 2, 30);
774 else
775 opc = ENCODE (0, 2, 30);
776
777 switch (operand.type)
778 {
779 case MEMORY_OPERAND_OFFSET:
780 {
781 pre_index = ENCODE (1, 1, 24);
782 write_back = ENCODE (0, 1, 23);
783 break;
784 }
afbe19f8
PL
785 case MEMORY_OPERAND_POSTINDEX:
786 {
787 pre_index = ENCODE (0, 1, 24);
788 write_back = ENCODE (1, 1, 23);
789 break;
790 }
bb903df0
PL
791 case MEMORY_OPERAND_PREINDEX:
792 {
793 pre_index = ENCODE (1, 1, 24);
794 write_back = ENCODE (1, 1, 23);
795 break;
796 }
797 default:
798 return 0;
799 }
800
afbe19f8 801 return emit_insn (buf, opcode | opc | pre_index | write_back
7781c06f
YQ
802 | ENCODE (operand.index >> 3, 7, 15)
803 | ENCODE (rt2.num, 5, 10)
bb903df0
PL
804 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
805}
806
afbe19f8
PL
807/* Write a STP instruction into *BUF.
808
809 STP rt, rt2, [rn, #offset]
810 STP rt, rt2, [rn, #index]!
811 STP rt, rt2, [rn], #index
812
813 RT and RT2 are the registers to store.
814 RN is the base address register.
815 OFFSET is the immediate to add to the base address. It is limited to a
816 -512 .. 504 range (7 bits << 3). */
817
818static int
819emit_stp (uint32_t *buf, struct aarch64_register rt,
820 struct aarch64_register rt2, struct aarch64_register rn,
821 struct aarch64_memory_operand operand)
822{
823 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
824}
825
826/* Write a LDP instruction into *BUF.
827
828 LDP rt, rt2, [rn, #offset]
829 LDP rt, rt2, [rn, #index]!
830 LDP rt, rt2, [rn], #index
831
832 RT and RT2 are the registers to store.
833 RN is the base address register.
834 OFFSET is the immediate to add to the base address. It is limited to a
835 -512 .. 504 range (7 bits << 3). */
836
837static int
838emit_ldp (uint32_t *buf, struct aarch64_register rt,
839 struct aarch64_register rt2, struct aarch64_register rn,
840 struct aarch64_memory_operand operand)
841{
842 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
843}
844
bb903df0
PL
845/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
846
847 LDP qt, qt2, [rn, #offset]
848
849 RT and RT2 are the Q registers to store.
850 RN is the base address register.
851 OFFSET is the immediate to add to the base address. It is limited to
852 -1024 .. 1008 range (7 bits << 4). */
853
854static int
855emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
856 struct aarch64_register rn, int32_t offset)
857{
858 uint32_t opc = ENCODE (2, 2, 30);
859 uint32_t pre_index = ENCODE (1, 1, 24);
860
861 return emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
862 | ENCODE (offset >> 4, 7, 15) | ENCODE (rt2, 5, 10)
863 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
864}
865
866/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
867
868 STP qt, qt2, [rn, #offset]
869
870 RT and RT2 are the Q registers to store.
871 RN is the base address register.
872 OFFSET is the immediate to add to the base address. It is limited to
873 -1024 .. 1008 range (7 bits << 4). */
874
875static int
876emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
877 struct aarch64_register rn, int32_t offset)
878{
879 uint32_t opc = ENCODE (2, 2, 30);
880 uint32_t pre_index = ENCODE (1, 1, 24);
881
882 return emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
883 | ENCODE (offset >> 4, 7, 15)
884 | ENCODE (rt2, 5, 10)
885 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
886}
887
afbe19f8
PL
888/* Write a LDRH instruction into *BUF.
889
890 LDRH wt, [xn, #offset]
891 LDRH wt, [xn, #index]!
892 LDRH wt, [xn], #index
893
894 RT is the register to store.
895 RN is the base address register.
896 OFFSET is the immediate to add to the base address. It is limited to
897 0 .. 32760 range (12 bits << 3). */
898
899static int
900emit_ldrh (uint32_t *buf, struct aarch64_register rt,
901 struct aarch64_register rn,
902 struct aarch64_memory_operand operand)
903{
904 return emit_load_store (buf, 1, LDR, rt, rn, operand);
905}
906
907/* Write a LDRB instruction into *BUF.
908
909 LDRB wt, [xn, #offset]
910 LDRB wt, [xn, #index]!
911 LDRB wt, [xn], #index
912
913 RT is the register to store.
914 RN is the base address register.
915 OFFSET is the immediate to add to the base address. It is limited to
916 0 .. 32760 range (12 bits << 3). */
917
918static int
919emit_ldrb (uint32_t *buf, struct aarch64_register rt,
920 struct aarch64_register rn,
921 struct aarch64_memory_operand operand)
922{
923 return emit_load_store (buf, 0, LDR, rt, rn, operand);
924}
925
bb903df0 926
bb903df0
PL
927
928/* Write a STR instruction into *BUF.
929
930 STR rt, [rn, #offset]
931 STR rt, [rn, #index]!
afbe19f8 932 STR rt, [rn], #index
bb903df0
PL
933
934 RT is the register to store.
935 RN is the base address register.
936 OFFSET is the immediate to add to the base address. It is limited to
937 0 .. 32760 range (12 bits << 3). */
938
939static int
940emit_str (uint32_t *buf, struct aarch64_register rt,
941 struct aarch64_register rn,
942 struct aarch64_memory_operand operand)
943{
944 return emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
945}
946
947/* Helper function emitting an exclusive load or store instruction. */
948
949static int
950emit_load_store_exclusive (uint32_t *buf, uint32_t size,
951 enum aarch64_opcodes opcode,
952 struct aarch64_register rs,
953 struct aarch64_register rt,
954 struct aarch64_register rt2,
955 struct aarch64_register rn)
956{
957 return emit_insn (buf, opcode | ENCODE (size, 2, 30)
958 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
959 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
960}
961
962/* Write a LAXR instruction into *BUF.
963
964 LDAXR rt, [xn]
965
966 RT is the destination register.
967 RN is the base address register. */
968
969static int
970emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
971 struct aarch64_register rn)
972{
973 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
974 xzr, rn);
975}
976
977/* Write a STXR instruction into *BUF.
978
979 STXR ws, rt, [xn]
980
981 RS is the result register, it indicates if the store succeeded or not.
982 RT is the destination register.
983 RN is the base address register. */
984
985static int
986emit_stxr (uint32_t *buf, struct aarch64_register rs,
987 struct aarch64_register rt, struct aarch64_register rn)
988{
989 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
990 xzr, rn);
991}
992
993/* Write a STLR instruction into *BUF.
994
995 STLR rt, [xn]
996
997 RT is the register to store.
998 RN is the base address register. */
999
1000static int
1001emit_stlr (uint32_t *buf, struct aarch64_register rt,
1002 struct aarch64_register rn)
1003{
1004 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1005 xzr, rn);
1006}
1007
1008/* Helper function for data processing instructions with register sources. */
1009
1010static int
1011emit_data_processing_reg (uint32_t *buf, enum aarch64_opcodes opcode,
1012 struct aarch64_register rd,
1013 struct aarch64_register rn,
1014 struct aarch64_register rm)
1015{
1016 uint32_t size = ENCODE (rd.is64, 1, 31);
1017
1018 return emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1019 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1020}
1021
1022/* Helper function for data processing instructions taking either a register
1023 or an immediate. */
1024
1025static int
1026emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1027 struct aarch64_register rd,
1028 struct aarch64_register rn,
1029 struct aarch64_operand operand)
1030{
1031 uint32_t size = ENCODE (rd.is64, 1, 31);
1032 /* The opcode is different for register and immediate source operands. */
1033 uint32_t operand_opcode;
1034
1035 if (operand.type == OPERAND_IMMEDIATE)
1036 {
1037 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1038 operand_opcode = ENCODE (8, 4, 25);
1039
1040 return emit_insn (buf, opcode | operand_opcode | size
1041 | ENCODE (operand.imm, 12, 10)
1042 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1043 }
1044 else
1045 {
1046 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1047 operand_opcode = ENCODE (5, 4, 25);
1048
1049 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1050 rn, operand.reg);
1051 }
1052}
1053
1054/* Write an ADD instruction into *BUF.
1055
1056 ADD rd, rn, #imm
1057 ADD rd, rn, rm
1058
1059 This function handles both an immediate and register add.
1060
1061 RD is the destination register.
1062 RN is the input register.
1063 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1064 OPERAND_REGISTER. */
1065
1066static int
1067emit_add (uint32_t *buf, struct aarch64_register rd,
1068 struct aarch64_register rn, struct aarch64_operand operand)
1069{
1070 return emit_data_processing (buf, ADD, rd, rn, operand);
1071}
1072
1073/* Write a SUB instruction into *BUF.
1074
1075 SUB rd, rn, #imm
1076 SUB rd, rn, rm
1077
1078 This function handles both an immediate and register sub.
1079
1080 RD is the destination register.
1081 RN is the input register.
1082 IMM is the immediate to substract to RN. */
1083
1084static int
1085emit_sub (uint32_t *buf, struct aarch64_register rd,
1086 struct aarch64_register rn, struct aarch64_operand operand)
1087{
1088 return emit_data_processing (buf, SUB, rd, rn, operand);
1089}
1090
1091/* Write a MOV instruction into *BUF.
1092
1093 MOV rd, #imm
1094 MOV rd, rm
1095
1096 This function handles both a wide immediate move and a register move,
1097 with the condition that the source register is not xzr. xzr and the
1098 stack pointer share the same encoding and this function only supports
1099 the stack pointer.
1100
1101 RD is the destination register.
1102 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1103 OPERAND_REGISTER. */
1104
1105static int
1106emit_mov (uint32_t *buf, struct aarch64_register rd,
1107 struct aarch64_operand operand)
1108{
1109 if (operand.type == OPERAND_IMMEDIATE)
1110 {
1111 uint32_t size = ENCODE (rd.is64, 1, 31);
1112 /* Do not shift the immediate. */
1113 uint32_t shift = ENCODE (0, 2, 21);
1114
1115 return emit_insn (buf, MOV | size | shift
1116 | ENCODE (operand.imm, 16, 5)
1117 | ENCODE (rd.num, 5, 0));
1118 }
1119 else
1120 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1121}
1122
1123/* Write a MOVK instruction into *BUF.
1124
1125 MOVK rd, #imm, lsl #shift
1126
1127 RD is the destination register.
1128 IMM is the immediate.
1129 SHIFT is the logical shift left to apply to IMM. */
1130
1131static int
7781c06f
YQ
1132emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1133 unsigned shift)
bb903df0
PL
1134{
1135 uint32_t size = ENCODE (rd.is64, 1, 31);
1136
1137 return emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1138 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1139}
1140
1141/* Write instructions into *BUF in order to move ADDR into a register.
1142 ADDR can be a 64-bit value.
1143
1144 This function will emit a series of MOV and MOVK instructions, such as:
1145
1146 MOV xd, #(addr)
1147 MOVK xd, #(addr >> 16), lsl #16
1148 MOVK xd, #(addr >> 32), lsl #32
1149 MOVK xd, #(addr >> 48), lsl #48 */
1150
1151static int
1152emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1153{
1154 uint32_t *p = buf;
1155
1156 /* The MOV (wide immediate) instruction clears to top bits of the
1157 register. */
1158 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1159
1160 if ((addr >> 16) != 0)
1161 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1162 else
1163 return p - buf;
1164
1165 if ((addr >> 32) != 0)
1166 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1167 else
1168 return p - buf;
1169
1170 if ((addr >> 48) != 0)
1171 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1172
1173 return p - buf;
1174}
1175
afbe19f8
PL
1176/* Write a SUBS instruction into *BUF.
1177
1178 SUBS rd, rn, rm
1179
1180 This instruction update the condition flags.
1181
1182 RD is the destination register.
1183 RN and RM are the source registers. */
1184
1185static int
1186emit_subs (uint32_t *buf, struct aarch64_register rd,
1187 struct aarch64_register rn, struct aarch64_operand operand)
1188{
1189 return emit_data_processing (buf, SUBS, rd, rn, operand);
1190}
1191
1192/* Write a CMP instruction into *BUF.
1193
1194 CMP rn, rm
1195
1196 This instruction is an alias of SUBS xzr, rn, rm.
1197
1198 RN and RM are the registers to compare. */
1199
1200static int
1201emit_cmp (uint32_t *buf, struct aarch64_register rn,
1202 struct aarch64_operand operand)
1203{
1204 return emit_subs (buf, xzr, rn, operand);
1205}
1206
1207/* Write a AND instruction into *BUF.
1208
1209 AND rd, rn, rm
1210
1211 RD is the destination register.
1212 RN and RM are the source registers. */
1213
1214static int
1215emit_and (uint32_t *buf, struct aarch64_register rd,
1216 struct aarch64_register rn, struct aarch64_register rm)
1217{
1218 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1219}
1220
1221/* Write a ORR instruction into *BUF.
1222
1223 ORR rd, rn, rm
1224
1225 RD is the destination register.
1226 RN and RM are the source registers. */
1227
1228static int
1229emit_orr (uint32_t *buf, struct aarch64_register rd,
1230 struct aarch64_register rn, struct aarch64_register rm)
1231{
1232 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1233}
1234
1235/* Write a ORN instruction into *BUF.
1236
1237 ORN rd, rn, rm
1238
1239 RD is the destination register.
1240 RN and RM are the source registers. */
1241
1242static int
1243emit_orn (uint32_t *buf, struct aarch64_register rd,
1244 struct aarch64_register rn, struct aarch64_register rm)
1245{
1246 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1247}
1248
1249/* Write a EOR instruction into *BUF.
1250
1251 EOR rd, rn, rm
1252
1253 RD is the destination register.
1254 RN and RM are the source registers. */
1255
1256static int
1257emit_eor (uint32_t *buf, struct aarch64_register rd,
1258 struct aarch64_register rn, struct aarch64_register rm)
1259{
1260 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1261}
1262
1263/* Write a MVN instruction into *BUF.
1264
1265 MVN rd, rm
1266
1267 This is an alias for ORN rd, xzr, rm.
1268
1269 RD is the destination register.
1270 RM is the source register. */
1271
1272static int
1273emit_mvn (uint32_t *buf, struct aarch64_register rd,
1274 struct aarch64_register rm)
1275{
1276 return emit_orn (buf, rd, xzr, rm);
1277}
1278
1279/* Write a LSLV instruction into *BUF.
1280
1281 LSLV rd, rn, rm
1282
1283 RD is the destination register.
1284 RN and RM are the source registers. */
1285
1286static int
1287emit_lslv (uint32_t *buf, struct aarch64_register rd,
1288 struct aarch64_register rn, struct aarch64_register rm)
1289{
1290 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1291}
1292
1293/* Write a LSRV instruction into *BUF.
1294
1295 LSRV rd, rn, rm
1296
1297 RD is the destination register.
1298 RN and RM are the source registers. */
1299
1300static int
1301emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1302 struct aarch64_register rn, struct aarch64_register rm)
1303{
1304 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1305}
1306
1307/* Write a ASRV instruction into *BUF.
1308
1309 ASRV rd, rn, rm
1310
1311 RD is the destination register.
1312 RN and RM are the source registers. */
1313
1314static int
1315emit_asrv (uint32_t *buf, struct aarch64_register rd,
1316 struct aarch64_register rn, struct aarch64_register rm)
1317{
1318 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1319}
1320
1321/* Write a MUL instruction into *BUF.
1322
1323 MUL rd, rn, rm
1324
1325 RD is the destination register.
1326 RN and RM are the source registers. */
1327
1328static int
1329emit_mul (uint32_t *buf, struct aarch64_register rd,
1330 struct aarch64_register rn, struct aarch64_register rm)
1331{
1332 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1333}
1334
bb903df0
PL
1335/* Write a MRS instruction into *BUF. The register size is 64-bit.
1336
1337 MRS xt, system_reg
1338
1339 RT is the destination register.
1340 SYSTEM_REG is special purpose register to read. */
1341
1342static int
1343emit_mrs (uint32_t *buf, struct aarch64_register rt,
1344 enum aarch64_system_control_registers system_reg)
1345{
1346 return emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1347 | ENCODE (rt.num, 5, 0));
1348}
1349
1350/* Write a MSR instruction into *BUF. The register size is 64-bit.
1351
1352 MSR system_reg, xt
1353
1354 SYSTEM_REG is special purpose register to write.
1355 RT is the input register. */
1356
1357static int
1358emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1359 struct aarch64_register rt)
1360{
1361 return emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1362 | ENCODE (rt.num, 5, 0));
1363}
1364
1365/* Write a SEVL instruction into *BUF.
1366
1367 This is a hint instruction telling the hardware to trigger an event. */
1368
1369static int
1370emit_sevl (uint32_t *buf)
1371{
1372 return emit_insn (buf, SEVL);
1373}
1374
1375/* Write a WFE instruction into *BUF.
1376
1377 This is a hint instruction telling the hardware to wait for an event. */
1378
1379static int
1380emit_wfe (uint32_t *buf)
1381{
1382 return emit_insn (buf, WFE);
1383}
1384
afbe19f8
PL
1385/* Write a SBFM instruction into *BUF.
1386
1387 SBFM rd, rn, #immr, #imms
1388
1389 This instruction moves the bits from #immr to #imms into the
1390 destination, sign extending the result.
1391
1392 RD is the destination register.
1393 RN is the source register.
1394 IMMR is the bit number to start at (least significant bit).
1395 IMMS is the bit number to stop at (most significant bit). */
1396
1397static int
1398emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1399 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1400{
1401 uint32_t size = ENCODE (rd.is64, 1, 31);
1402 uint32_t n = ENCODE (rd.is64, 1, 22);
1403
1404 return emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1405 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1406 | ENCODE (rd.num, 5, 0));
1407}
1408
1409/* Write a SBFX instruction into *BUF.
1410
1411 SBFX rd, rn, #lsb, #width
1412
1413 This instruction moves #width bits from #lsb into the destination, sign
1414 extending the result. This is an alias for:
1415
1416 SBFM rd, rn, #lsb, #(lsb + width - 1)
1417
1418 RD is the destination register.
1419 RN is the source register.
1420 LSB is the bit number to start at (least significant bit).
1421 WIDTH is the number of bits to move. */
1422
1423static int
1424emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1425 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1426{
1427 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1428}
1429
1430/* Write a UBFM instruction into *BUF.
1431
1432 UBFM rd, rn, #immr, #imms
1433
1434 This instruction moves the bits from #immr to #imms into the
1435 destination, extending the result with zeros.
1436
1437 RD is the destination register.
1438 RN is the source register.
1439 IMMR is the bit number to start at (least significant bit).
1440 IMMS is the bit number to stop at (most significant bit). */
1441
1442static int
1443emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1444 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1445{
1446 uint32_t size = ENCODE (rd.is64, 1, 31);
1447 uint32_t n = ENCODE (rd.is64, 1, 22);
1448
1449 return emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1450 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1451 | ENCODE (rd.num, 5, 0));
1452}
1453
1454/* Write a UBFX instruction into *BUF.
1455
1456 UBFX rd, rn, #lsb, #width
1457
1458 This instruction moves #width bits from #lsb into the destination,
1459 extending the result with zeros. This is an alias for:
1460
1461 UBFM rd, rn, #lsb, #(lsb + width - 1)
1462
1463 RD is the destination register.
1464 RN is the source register.
1465 LSB is the bit number to start at (least significant bit).
1466 WIDTH is the number of bits to move. */
1467
1468static int
1469emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1470 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1471{
1472 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1473}
1474
1475/* Write a CSINC instruction into *BUF.
1476
1477 CSINC rd, rn, rm, cond
1478
1479 This instruction conditionally increments rn or rm and places the result
1480 in rd. rn is chosen is the condition is true.
1481
1482 RD is the destination register.
1483 RN and RM are the source registers.
1484 COND is the encoded condition. */
1485
1486static int
1487emit_csinc (uint32_t *buf, struct aarch64_register rd,
1488 struct aarch64_register rn, struct aarch64_register rm,
1489 unsigned cond)
1490{
1491 uint32_t size = ENCODE (rd.is64, 1, 31);
1492
1493 return emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1494 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1495 | ENCODE (rd.num, 5, 0));
1496}
1497
1498/* Write a CSET instruction into *BUF.
1499
1500 CSET rd, cond
1501
1502 This instruction conditionally write 1 or 0 in the destination register.
1503 1 is written if the condition is true. This is an alias for:
1504
1505 CSINC rd, xzr, xzr, !cond
1506
1507 Note that the condition needs to be inverted.
1508
1509 RD is the destination register.
1510 RN and RM are the source registers.
1511 COND is the encoded condition. */
1512
1513static int
1514emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1515{
1516 /* The least significant bit of the condition needs toggling in order to
1517 invert it. */
1518 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1519}
1520
bb903df0
PL
1521/* Write LEN instructions from BUF into the inferior memory at *TO.
1522
1523 Note instructions are always little endian on AArch64, unlike data. */
1524
1525static void
1526append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1527{
1528 size_t byte_len = len * sizeof (uint32_t);
1529#if (__BYTE_ORDER == __BIG_ENDIAN)
1530 uint32_t *le_buf = xmalloc (byte_len);
1531 size_t i;
1532
1533 for (i = 0; i < len; i++)
1534 le_buf[i] = htole32 (buf[i]);
1535
1536 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1537
1538 xfree (le_buf);
1539#else
1540 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1541#endif
1542
1543 *to += byte_len;
1544}
1545
0badd99f
YQ
1546/* Sub-class of struct aarch64_insn_data, store information of
1547 instruction relocation for fast tracepoint. Visitor can
1548 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1549 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1550
0badd99f
YQ
1551struct aarch64_insn_relocation_data
1552{
1553 struct aarch64_insn_data base;
1554
1555 /* The new address the instruction is relocated to. */
1556 CORE_ADDR new_addr;
1557 /* Pointer to the buffer of relocated instruction(s). */
1558 uint32_t *insn_ptr;
1559};
1560
1561/* Implementation of aarch64_insn_visitor method "b". */
1562
1563static void
1564aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1565 struct aarch64_insn_data *data)
1566{
1567 struct aarch64_insn_relocation_data *insn_reloc
1568 = (struct aarch64_insn_relocation_data *) data;
1569 int32_t new_offset
1570 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1571
1572 if (can_encode_int32 (new_offset, 28))
1573 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1574}
1575
1576/* Implementation of aarch64_insn_visitor method "b_cond". */
1577
1578static void
1579aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1580 struct aarch64_insn_data *data)
1581{
1582 struct aarch64_insn_relocation_data *insn_reloc
1583 = (struct aarch64_insn_relocation_data *) data;
1584 int32_t new_offset
1585 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1586
1587 if (can_encode_int32 (new_offset, 21))
1588 {
1589 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1590 new_offset);
bb903df0 1591 }
0badd99f 1592 else if (can_encode_int32 (new_offset, 28))
bb903df0 1593 {
0badd99f
YQ
1594 /* The offset is out of range for a conditional branch
1595 instruction but not for a unconditional branch. We can use
1596 the following instructions instead:
bb903df0 1597
0badd99f
YQ
1598 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1599 B NOT_TAKEN ; Else jump over TAKEN and continue.
1600 TAKEN:
1601 B #(offset - 8)
1602 NOT_TAKEN:
1603
1604 */
1605
1606 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1607 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1608 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1609 }
0badd99f 1610}
bb903df0 1611
0badd99f
YQ
1612/* Implementation of aarch64_insn_visitor method "cb". */
1613
1614static void
1615aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1616 const unsigned rn, int is64,
1617 struct aarch64_insn_data *data)
1618{
1619 struct aarch64_insn_relocation_data *insn_reloc
1620 = (struct aarch64_insn_relocation_data *) data;
1621 int32_t new_offset
1622 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1623
1624 if (can_encode_int32 (new_offset, 21))
1625 {
1626 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1627 aarch64_register (rn, is64), new_offset);
bb903df0 1628 }
0badd99f 1629 else if (can_encode_int32 (new_offset, 28))
bb903df0 1630 {
0badd99f
YQ
1631 /* The offset is out of range for a compare and branch
1632 instruction but not for a unconditional branch. We can use
1633 the following instructions instead:
1634
1635 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1636 B NOT_TAKEN ; Else jump over TAKEN and continue.
1637 TAKEN:
1638 B #(offset - 8)
1639 NOT_TAKEN:
1640
1641 */
1642 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1643 aarch64_register (rn, is64), 8);
1644 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1645 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1646 }
1647}
bb903df0 1648
0badd99f 1649/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1650
0badd99f
YQ
1651static void
1652aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1653 const unsigned rt, unsigned bit,
1654 struct aarch64_insn_data *data)
1655{
1656 struct aarch64_insn_relocation_data *insn_reloc
1657 = (struct aarch64_insn_relocation_data *) data;
1658 int32_t new_offset
1659 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1660
1661 if (can_encode_int32 (new_offset, 16))
1662 {
1663 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1664 aarch64_register (rt, 1), new_offset);
bb903df0 1665 }
0badd99f 1666 else if (can_encode_int32 (new_offset, 28))
bb903df0 1667 {
0badd99f
YQ
1668 /* The offset is out of range for a test bit and branch
1669 instruction but not for a unconditional branch. We can use
1670 the following instructions instead:
1671
1672 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1673 B NOT_TAKEN ; Else jump over TAKEN and continue.
1674 TAKEN:
1675 B #(offset - 8)
1676 NOT_TAKEN:
1677
1678 */
1679 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1680 aarch64_register (rt, 1), 8);
1681 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1682 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1683 new_offset - 8);
1684 }
1685}
bb903df0 1686
0badd99f 1687/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1688
0badd99f
YQ
1689static void
1690aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1691 const int is_adrp,
1692 struct aarch64_insn_data *data)
1693{
1694 struct aarch64_insn_relocation_data *insn_reloc
1695 = (struct aarch64_insn_relocation_data *) data;
1696 /* We know exactly the address the ADR{P,} instruction will compute.
1697 We can just write it to the destination register. */
1698 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1699
0badd99f
YQ
1700 if (is_adrp)
1701 {
1702 /* Clear the lower 12 bits of the offset to get the 4K page. */
1703 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1704 aarch64_register (rd, 1),
1705 address & ~0xfff);
1706 }
1707 else
1708 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1709 aarch64_register (rd, 1), address);
1710}
bb903df0 1711
0badd99f 1712/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1713
0badd99f
YQ
1714static void
1715aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1716 const unsigned rt, const int is64,
1717 struct aarch64_insn_data *data)
1718{
1719 struct aarch64_insn_relocation_data *insn_reloc
1720 = (struct aarch64_insn_relocation_data *) data;
1721 CORE_ADDR address = data->insn_addr + offset;
1722
1723 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1724 aarch64_register (rt, 1), address);
1725
1726 /* We know exactly what address to load from, and what register we
1727 can use:
1728
1729 MOV xd, #(oldloc + offset)
1730 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1731 ...
1732
1733 LDR xd, [xd] ; or LDRSW xd, [xd]
1734
1735 */
1736
1737 if (is_sw)
1738 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1739 aarch64_register (rt, 1),
1740 aarch64_register (rt, 1),
1741 offset_memory_operand (0));
bb903df0 1742 else
0badd99f
YQ
1743 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1744 aarch64_register (rt, is64),
1745 aarch64_register (rt, 1),
1746 offset_memory_operand (0));
1747}
1748
1749/* Implementation of aarch64_insn_visitor method "others". */
1750
1751static void
1752aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1753 struct aarch64_insn_data *data)
1754{
1755 struct aarch64_insn_relocation_data *insn_reloc
1756 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1757
0badd99f
YQ
1758 /* The instruction is not PC relative. Just re-emit it at the new
1759 location. */
1760 insn_reloc->insn_ptr += emit_insn (insn_reloc->insn_ptr, insn);
1761}
1762
1763static const struct aarch64_insn_visitor visitor =
1764{
1765 aarch64_ftrace_insn_reloc_b,
1766 aarch64_ftrace_insn_reloc_b_cond,
1767 aarch64_ftrace_insn_reloc_cb,
1768 aarch64_ftrace_insn_reloc_tb,
1769 aarch64_ftrace_insn_reloc_adr,
1770 aarch64_ftrace_insn_reloc_ldr_literal,
1771 aarch64_ftrace_insn_reloc_others,
1772};
1773
bb903df0
PL
1774/* Implementation of linux_target_ops method
1775 "install_fast_tracepoint_jump_pad". */
1776
1777static int
1778aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1779 CORE_ADDR tpaddr,
1780 CORE_ADDR collector,
1781 CORE_ADDR lockaddr,
1782 ULONGEST orig_size,
1783 CORE_ADDR *jump_entry,
1784 CORE_ADDR *trampoline,
1785 ULONGEST *trampoline_size,
1786 unsigned char *jjump_pad_insn,
1787 ULONGEST *jjump_pad_insn_size,
1788 CORE_ADDR *adjusted_insn_addr,
1789 CORE_ADDR *adjusted_insn_addr_end,
1790 char *err)
1791{
1792 uint32_t buf[256];
1793 uint32_t *p = buf;
1794 int32_t offset;
1795 int i;
70b439f0 1796 uint32_t insn;
bb903df0 1797 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1798 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1799
1800 /* We need to save the current state on the stack both to restore it
1801 later and to collect register values when the tracepoint is hit.
1802
1803 The saved registers are pushed in a layout that needs to be in sync
1804 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1805 the supply_fast_tracepoint_registers function will fill in the
1806 register cache from a pointer to saved registers on the stack we build
1807 here.
1808
1809 For simplicity, we set the size of each cell on the stack to 16 bytes.
1810 This way one cell can hold any register type, from system registers
1811 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1812 has to be 16 bytes aligned anyway.
1813
1814 Note that the CPSR register does not exist on AArch64. Instead we
1815 can access system bits describing the process state with the
1816 MRS/MSR instructions, namely the condition flags. We save them as
1817 if they are part of a CPSR register because that's how GDB
1818 interprets these system bits. At the moment, only the condition
1819 flags are saved in CPSR (NZCV).
1820
1821 Stack layout, each cell is 16 bytes (descending):
1822
1823 High *-------- SIMD&FP registers from 31 down to 0. --------*
1824 | q31 |
1825 . .
1826 . . 32 cells
1827 . .
1828 | q0 |
1829 *---- General purpose registers from 30 down to 0. ----*
1830 | x30 |
1831 . .
1832 . . 31 cells
1833 . .
1834 | x0 |
1835 *------------- Special purpose registers. -------------*
1836 | SP |
1837 | PC |
1838 | CPSR (NZCV) | 5 cells
1839 | FPSR |
1840 | FPCR | <- SP + 16
1841 *------------- collecting_t object --------------------*
1842 | TPIDR_EL0 | struct tracepoint * |
1843 Low *------------------------------------------------------*
1844
1845 After this stack is set up, we issue a call to the collector, passing
1846 it the saved registers at (SP + 16). */
1847
1848 /* Push SIMD&FP registers on the stack:
1849
1850 SUB sp, sp, #(32 * 16)
1851
1852 STP q30, q31, [sp, #(30 * 16)]
1853 ...
1854 STP q0, q1, [sp]
1855
1856 */
1857 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1858 for (i = 30; i >= 0; i -= 2)
1859 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1860
1861 /* Push general puspose registers on the stack. Note that we do not need
1862 to push x31 as it represents the xzr register and not the stack
1863 pointer in a STR instruction.
1864
1865 SUB sp, sp, #(31 * 16)
1866
1867 STR x30, [sp, #(30 * 16)]
1868 ...
1869 STR x0, [sp]
1870
1871 */
1872 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1873 for (i = 30; i >= 0; i -= 1)
1874 p += emit_str (p, aarch64_register (i, 1), sp,
1875 offset_memory_operand (i * 16));
1876
1877 /* Make space for 5 more cells.
1878
1879 SUB sp, sp, #(5 * 16)
1880
1881 */
1882 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1883
1884
1885 /* Save SP:
1886
1887 ADD x4, sp, #((32 + 31 + 5) * 16)
1888 STR x4, [sp, #(4 * 16)]
1889
1890 */
1891 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1892 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1893
1894 /* Save PC (tracepoint address):
1895
1896 MOV x3, #(tpaddr)
1897 ...
1898
1899 STR x3, [sp, #(3 * 16)]
1900
1901 */
1902
1903 p += emit_mov_addr (p, x3, tpaddr);
1904 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1905
1906 /* Save CPSR (NZCV), FPSR and FPCR:
1907
1908 MRS x2, nzcv
1909 MRS x1, fpsr
1910 MRS x0, fpcr
1911
1912 STR x2, [sp, #(2 * 16)]
1913 STR x1, [sp, #(1 * 16)]
1914 STR x0, [sp, #(0 * 16)]
1915
1916 */
1917 p += emit_mrs (p, x2, NZCV);
1918 p += emit_mrs (p, x1, FPSR);
1919 p += emit_mrs (p, x0, FPCR);
1920 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1921 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1922 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1923
1924 /* Push the collecting_t object. It consist of the address of the
1925 tracepoint and an ID for the current thread. We get the latter by
1926 reading the tpidr_el0 system register. It corresponds to the
1927 NT_ARM_TLS register accessible with ptrace.
1928
1929 MOV x0, #(tpoint)
1930 ...
1931
1932 MRS x1, tpidr_el0
1933
1934 STP x0, x1, [sp, #-16]!
1935
1936 */
1937
1938 p += emit_mov_addr (p, x0, tpoint);
1939 p += emit_mrs (p, x1, TPIDR_EL0);
1940 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1941
1942 /* Spin-lock:
1943
1944 The shared memory for the lock is at lockaddr. It will hold zero
1945 if no-one is holding the lock, otherwise it contains the address of
1946 the collecting_t object on the stack of the thread which acquired it.
1947
1948 At this stage, the stack pointer points to this thread's collecting_t
1949 object.
1950
1951 We use the following registers:
1952 - x0: Address of the lock.
1953 - x1: Pointer to collecting_t object.
1954 - x2: Scratch register.
1955
1956 MOV x0, #(lockaddr)
1957 ...
1958 MOV x1, sp
1959
1960 ; Trigger an event local to this core. So the following WFE
1961 ; instruction is ignored.
1962 SEVL
1963 again:
1964 ; Wait for an event. The event is triggered by either the SEVL
1965 ; or STLR instructions (store release).
1966 WFE
1967
1968 ; Atomically read at lockaddr. This marks the memory location as
1969 ; exclusive. This instruction also has memory constraints which
1970 ; make sure all previous data reads and writes are done before
1971 ; executing it.
1972 LDAXR x2, [x0]
1973
1974 ; Try again if another thread holds the lock.
1975 CBNZ x2, again
1976
1977 ; We can lock it! Write the address of the collecting_t object.
1978 ; This instruction will fail if the memory location is not marked
1979 ; as exclusive anymore. If it succeeds, it will remove the
1980 ; exclusive mark on the memory location. This way, if another
1981 ; thread executes this instruction before us, we will fail and try
1982 ; all over again.
1983 STXR w2, x1, [x0]
1984 CBNZ w2, again
1985
1986 */
1987
1988 p += emit_mov_addr (p, x0, lockaddr);
1989 p += emit_mov (p, x1, register_operand (sp));
1990
1991 p += emit_sevl (p);
1992 p += emit_wfe (p);
1993 p += emit_ldaxr (p, x2, x0);
1994 p += emit_cb (p, 1, w2, -2 * 4);
1995 p += emit_stxr (p, w2, x1, x0);
1996 p += emit_cb (p, 1, x2, -4 * 4);
1997
1998 /* Call collector (struct tracepoint *, unsigned char *):
1999
2000 MOV x0, #(tpoint)
2001 ...
2002
2003 ; Saved registers start after the collecting_t object.
2004 ADD x1, sp, #16
2005
2006 ; We use an intra-procedure-call scratch register.
2007 MOV ip0, #(collector)
2008 ...
2009
2010 ; And call back to C!
2011 BLR ip0
2012
2013 */
2014
2015 p += emit_mov_addr (p, x0, tpoint);
2016 p += emit_add (p, x1, sp, immediate_operand (16));
2017
2018 p += emit_mov_addr (p, ip0, collector);
2019 p += emit_blr (p, ip0);
2020
2021 /* Release the lock.
2022
2023 MOV x0, #(lockaddr)
2024 ...
2025
2026 ; This instruction is a normal store with memory ordering
2027 ; constraints. Thanks to this we do not have to put a data
2028 ; barrier instruction to make sure all data read and writes are done
2029 ; before this instruction is executed. Furthermore, this instrucion
2030 ; will trigger an event, letting other threads know they can grab
2031 ; the lock.
2032 STLR xzr, [x0]
2033
2034 */
2035 p += emit_mov_addr (p, x0, lockaddr);
2036 p += emit_stlr (p, xzr, x0);
2037
2038 /* Free collecting_t object:
2039
2040 ADD sp, sp, #16
2041
2042 */
2043 p += emit_add (p, sp, sp, immediate_operand (16));
2044
2045 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2046 registers from the stack.
2047
2048 LDR x2, [sp, #(2 * 16)]
2049 LDR x1, [sp, #(1 * 16)]
2050 LDR x0, [sp, #(0 * 16)]
2051
2052 MSR NZCV, x2
2053 MSR FPSR, x1
2054 MSR FPCR, x0
2055
2056 ADD sp, sp #(5 * 16)
2057
2058 */
2059 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2060 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2061 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2062 p += emit_msr (p, NZCV, x2);
2063 p += emit_msr (p, FPSR, x1);
2064 p += emit_msr (p, FPCR, x0);
2065
2066 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2067
2068 /* Pop general purpose registers:
2069
2070 LDR x0, [sp]
2071 ...
2072 LDR x30, [sp, #(30 * 16)]
2073
2074 ADD sp, sp, #(31 * 16)
2075
2076 */
2077 for (i = 0; i <= 30; i += 1)
2078 p += emit_ldr (p, aarch64_register (i, 1), sp,
2079 offset_memory_operand (i * 16));
2080 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2081
2082 /* Pop SIMD&FP registers:
2083
2084 LDP q0, q1, [sp]
2085 ...
2086 LDP q30, q31, [sp, #(30 * 16)]
2087
2088 ADD sp, sp, #(32 * 16)
2089
2090 */
2091 for (i = 0; i <= 30; i += 2)
2092 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2093 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2094
2095 /* Write the code into the inferior memory. */
2096 append_insns (&buildaddr, p - buf, buf);
2097
2098 /* Now emit the relocated instruction. */
2099 *adjusted_insn_addr = buildaddr;
70b439f0 2100 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2101
2102 insn_data.base.insn_addr = tpaddr;
2103 insn_data.new_addr = buildaddr;
2104 insn_data.insn_ptr = buf;
2105
2106 aarch64_relocate_instruction (insn, &visitor,
2107 (struct aarch64_insn_data *) &insn_data);
2108
bb903df0 2109 /* We may not have been able to relocate the instruction. */
0badd99f 2110 if (insn_data.insn_ptr == buf)
bb903df0
PL
2111 {
2112 sprintf (err,
2113 "E.Could not relocate instruction from %s to %s.",
2114 core_addr_to_string_nz (tpaddr),
2115 core_addr_to_string_nz (buildaddr));
2116 return 1;
2117 }
dfaffe9d 2118 else
0badd99f 2119 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2120 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2121
2122 /* Go back to the start of the buffer. */
2123 p = buf;
2124
2125 /* Emit a branch back from the jump pad. */
2126 offset = (tpaddr + orig_size - buildaddr);
2127 if (!can_encode_int32 (offset, 28))
2128 {
2129 sprintf (err,
2130 "E.Jump back from jump pad too far from tracepoint "
2131 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2132 offset);
2133 return 1;
2134 }
2135
2136 p += emit_b (p, 0, offset);
2137 append_insns (&buildaddr, p - buf, buf);
2138
2139 /* Give the caller a branch instruction into the jump pad. */
2140 offset = (*jump_entry - tpaddr);
2141 if (!can_encode_int32 (offset, 28))
2142 {
2143 sprintf (err,
2144 "E.Jump pad too far from tracepoint "
2145 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2146 offset);
2147 return 1;
2148 }
2149
2150 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2151 *jjump_pad_insn_size = 4;
2152
2153 /* Return the end address of our pad. */
2154 *jump_entry = buildaddr;
2155
2156 return 0;
2157}
2158
afbe19f8
PL
2159/* Helper function writing LEN instructions from START into
2160 current_insn_ptr. */
2161
2162static void
2163emit_ops_insns (const uint32_t *start, int len)
2164{
2165 CORE_ADDR buildaddr = current_insn_ptr;
2166
2167 if (debug_threads)
2168 debug_printf ("Adding %d instrucions at %s\n",
2169 len, paddress (buildaddr));
2170
2171 append_insns (&buildaddr, len, start);
2172 current_insn_ptr = buildaddr;
2173}
2174
2175/* Pop a register from the stack. */
2176
2177static int
2178emit_pop (uint32_t *buf, struct aarch64_register rt)
2179{
2180 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2181}
2182
2183/* Push a register on the stack. */
2184
2185static int
2186emit_push (uint32_t *buf, struct aarch64_register rt)
2187{
2188 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2189}
2190
2191/* Implementation of emit_ops method "emit_prologue". */
2192
2193static void
2194aarch64_emit_prologue (void)
2195{
2196 uint32_t buf[16];
2197 uint32_t *p = buf;
2198
2199 /* This function emit a prologue for the following function prototype:
2200
2201 enum eval_result_type f (unsigned char *regs,
2202 ULONGEST *value);
2203
2204 The first argument is a buffer of raw registers. The second
2205 argument is the result of
2206 evaluating the expression, which will be set to whatever is on top of
2207 the stack at the end.
2208
2209 The stack set up by the prologue is as such:
2210
2211 High *------------------------------------------------------*
2212 | LR |
2213 | FP | <- FP
2214 | x1 (ULONGEST *value) |
2215 | x0 (unsigned char *regs) |
2216 Low *------------------------------------------------------*
2217
2218 As we are implementing a stack machine, each opcode can expand the
2219 stack so we never know how far we are from the data saved by this
2220 prologue. In order to be able refer to value and regs later, we save
2221 the current stack pointer in the frame pointer. This way, it is not
2222 clobbered when calling C functions.
2223
2224 Finally, throughtout every operation, we are using register x0 as the
2225 top of the stack, and x1 as a scratch register. */
2226
2227 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2228 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2229 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2230
2231 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2232
2233
2234 emit_ops_insns (buf, p - buf);
2235}
2236
2237/* Implementation of emit_ops method "emit_epilogue". */
2238
2239static void
2240aarch64_emit_epilogue (void)
2241{
2242 uint32_t buf[16];
2243 uint32_t *p = buf;
2244
2245 /* Store the result of the expression (x0) in *value. */
2246 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2247 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2248 p += emit_str (p, x0, x1, offset_memory_operand (0));
2249
2250 /* Restore the previous state. */
2251 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2252 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2253
2254 /* Return expr_eval_no_error. */
2255 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2256 p += emit_ret (p, lr);
2257
2258 emit_ops_insns (buf, p - buf);
2259}
2260
2261/* Implementation of emit_ops method "emit_add". */
2262
2263static void
2264aarch64_emit_add (void)
2265{
2266 uint32_t buf[16];
2267 uint32_t *p = buf;
2268
2269 p += emit_pop (p, x1);
2270 p += emit_add (p, x0, x0, register_operand (x1));
2271
2272 emit_ops_insns (buf, p - buf);
2273}
2274
2275/* Implementation of emit_ops method "emit_sub". */
2276
2277static void
2278aarch64_emit_sub (void)
2279{
2280 uint32_t buf[16];
2281 uint32_t *p = buf;
2282
2283 p += emit_pop (p, x1);
2284 p += emit_sub (p, x0, x0, register_operand (x1));
2285
2286 emit_ops_insns (buf, p - buf);
2287}
2288
2289/* Implementation of emit_ops method "emit_mul". */
2290
2291static void
2292aarch64_emit_mul (void)
2293{
2294 uint32_t buf[16];
2295 uint32_t *p = buf;
2296
2297 p += emit_pop (p, x1);
2298 p += emit_mul (p, x0, x1, x0);
2299
2300 emit_ops_insns (buf, p - buf);
2301}
2302
2303/* Implementation of emit_ops method "emit_lsh". */
2304
2305static void
2306aarch64_emit_lsh (void)
2307{
2308 uint32_t buf[16];
2309 uint32_t *p = buf;
2310
2311 p += emit_pop (p, x1);
2312 p += emit_lslv (p, x0, x1, x0);
2313
2314 emit_ops_insns (buf, p - buf);
2315}
2316
2317/* Implementation of emit_ops method "emit_rsh_signed". */
2318
2319static void
2320aarch64_emit_rsh_signed (void)
2321{
2322 uint32_t buf[16];
2323 uint32_t *p = buf;
2324
2325 p += emit_pop (p, x1);
2326 p += emit_asrv (p, x0, x1, x0);
2327
2328 emit_ops_insns (buf, p - buf);
2329}
2330
2331/* Implementation of emit_ops method "emit_rsh_unsigned". */
2332
2333static void
2334aarch64_emit_rsh_unsigned (void)
2335{
2336 uint32_t buf[16];
2337 uint32_t *p = buf;
2338
2339 p += emit_pop (p, x1);
2340 p += emit_lsrv (p, x0, x1, x0);
2341
2342 emit_ops_insns (buf, p - buf);
2343}
2344
2345/* Implementation of emit_ops method "emit_ext". */
2346
2347static void
2348aarch64_emit_ext (int arg)
2349{
2350 uint32_t buf[16];
2351 uint32_t *p = buf;
2352
2353 p += emit_sbfx (p, x0, x0, 0, arg);
2354
2355 emit_ops_insns (buf, p - buf);
2356}
2357
2358/* Implementation of emit_ops method "emit_log_not". */
2359
2360static void
2361aarch64_emit_log_not (void)
2362{
2363 uint32_t buf[16];
2364 uint32_t *p = buf;
2365
2366 /* If the top of the stack is 0, replace it with 1. Else replace it with
2367 0. */
2368
2369 p += emit_cmp (p, x0, immediate_operand (0));
2370 p += emit_cset (p, x0, EQ);
2371
2372 emit_ops_insns (buf, p - buf);
2373}
2374
2375/* Implementation of emit_ops method "emit_bit_and". */
2376
2377static void
2378aarch64_emit_bit_and (void)
2379{
2380 uint32_t buf[16];
2381 uint32_t *p = buf;
2382
2383 p += emit_pop (p, x1);
2384 p += emit_and (p, x0, x0, x1);
2385
2386 emit_ops_insns (buf, p - buf);
2387}
2388
2389/* Implementation of emit_ops method "emit_bit_or". */
2390
2391static void
2392aarch64_emit_bit_or (void)
2393{
2394 uint32_t buf[16];
2395 uint32_t *p = buf;
2396
2397 p += emit_pop (p, x1);
2398 p += emit_orr (p, x0, x0, x1);
2399
2400 emit_ops_insns (buf, p - buf);
2401}
2402
2403/* Implementation of emit_ops method "emit_bit_xor". */
2404
2405static void
2406aarch64_emit_bit_xor (void)
2407{
2408 uint32_t buf[16];
2409 uint32_t *p = buf;
2410
2411 p += emit_pop (p, x1);
2412 p += emit_eor (p, x0, x0, x1);
2413
2414 emit_ops_insns (buf, p - buf);
2415}
2416
2417/* Implementation of emit_ops method "emit_bit_not". */
2418
2419static void
2420aarch64_emit_bit_not (void)
2421{
2422 uint32_t buf[16];
2423 uint32_t *p = buf;
2424
2425 p += emit_mvn (p, x0, x0);
2426
2427 emit_ops_insns (buf, p - buf);
2428}
2429
2430/* Implementation of emit_ops method "emit_equal". */
2431
2432static void
2433aarch64_emit_equal (void)
2434{
2435 uint32_t buf[16];
2436 uint32_t *p = buf;
2437
2438 p += emit_pop (p, x1);
2439 p += emit_cmp (p, x0, register_operand (x1));
2440 p += emit_cset (p, x0, EQ);
2441
2442 emit_ops_insns (buf, p - buf);
2443}
2444
2445/* Implementation of emit_ops method "emit_less_signed". */
2446
2447static void
2448aarch64_emit_less_signed (void)
2449{
2450 uint32_t buf[16];
2451 uint32_t *p = buf;
2452
2453 p += emit_pop (p, x1);
2454 p += emit_cmp (p, x1, register_operand (x0));
2455 p += emit_cset (p, x0, LT);
2456
2457 emit_ops_insns (buf, p - buf);
2458}
2459
2460/* Implementation of emit_ops method "emit_less_unsigned". */
2461
2462static void
2463aarch64_emit_less_unsigned (void)
2464{
2465 uint32_t buf[16];
2466 uint32_t *p = buf;
2467
2468 p += emit_pop (p, x1);
2469 p += emit_cmp (p, x1, register_operand (x0));
2470 p += emit_cset (p, x0, LO);
2471
2472 emit_ops_insns (buf, p - buf);
2473}
2474
2475/* Implementation of emit_ops method "emit_ref". */
2476
2477static void
2478aarch64_emit_ref (int size)
2479{
2480 uint32_t buf[16];
2481 uint32_t *p = buf;
2482
2483 switch (size)
2484 {
2485 case 1:
2486 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2487 break;
2488 case 2:
2489 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2490 break;
2491 case 4:
2492 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2493 break;
2494 case 8:
2495 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2496 break;
2497 default:
2498 /* Unknown size, bail on compilation. */
2499 emit_error = 1;
2500 break;
2501 }
2502
2503 emit_ops_insns (buf, p - buf);
2504}
2505
2506/* Implementation of emit_ops method "emit_if_goto". */
2507
2508static void
2509aarch64_emit_if_goto (int *offset_p, int *size_p)
2510{
2511 uint32_t buf[16];
2512 uint32_t *p = buf;
2513
2514 /* The Z flag is set or cleared here. */
2515 p += emit_cmp (p, x0, immediate_operand (0));
2516 /* This instruction must not change the Z flag. */
2517 p += emit_pop (p, x0);
2518 /* Branch over the next instruction if x0 == 0. */
2519 p += emit_bcond (p, EQ, 8);
2520
2521 /* The NOP instruction will be patched with an unconditional branch. */
2522 if (offset_p)
2523 *offset_p = (p - buf) * 4;
2524 if (size_p)
2525 *size_p = 4;
2526 p += emit_nop (p);
2527
2528 emit_ops_insns (buf, p - buf);
2529}
2530
2531/* Implementation of emit_ops method "emit_goto". */
2532
2533static void
2534aarch64_emit_goto (int *offset_p, int *size_p)
2535{
2536 uint32_t buf[16];
2537 uint32_t *p = buf;
2538
2539 /* The NOP instruction will be patched with an unconditional branch. */
2540 if (offset_p)
2541 *offset_p = 0;
2542 if (size_p)
2543 *size_p = 4;
2544 p += emit_nop (p);
2545
2546 emit_ops_insns (buf, p - buf);
2547}
2548
2549/* Implementation of emit_ops method "write_goto_address". */
2550
2551void
2552aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2553{
2554 uint32_t insn;
2555
2556 emit_b (&insn, 0, to - from);
2557 append_insns (&from, 1, &insn);
2558}
2559
2560/* Implementation of emit_ops method "emit_const". */
2561
2562static void
2563aarch64_emit_const (LONGEST num)
2564{
2565 uint32_t buf[16];
2566 uint32_t *p = buf;
2567
2568 p += emit_mov_addr (p, x0, num);
2569
2570 emit_ops_insns (buf, p - buf);
2571}
2572
2573/* Implementation of emit_ops method "emit_call". */
2574
2575static void
2576aarch64_emit_call (CORE_ADDR fn)
2577{
2578 uint32_t buf[16];
2579 uint32_t *p = buf;
2580
2581 p += emit_mov_addr (p, ip0, fn);
2582 p += emit_blr (p, ip0);
2583
2584 emit_ops_insns (buf, p - buf);
2585}
2586
2587/* Implementation of emit_ops method "emit_reg". */
2588
2589static void
2590aarch64_emit_reg (int reg)
2591{
2592 uint32_t buf[16];
2593 uint32_t *p = buf;
2594
2595 /* Set x0 to unsigned char *regs. */
2596 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2597 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2598 p += emit_mov (p, x1, immediate_operand (reg));
2599
2600 emit_ops_insns (buf, p - buf);
2601
2602 aarch64_emit_call (get_raw_reg_func_addr ());
2603}
2604
2605/* Implementation of emit_ops method "emit_pop". */
2606
2607static void
2608aarch64_emit_pop (void)
2609{
2610 uint32_t buf[16];
2611 uint32_t *p = buf;
2612
2613 p += emit_pop (p, x0);
2614
2615 emit_ops_insns (buf, p - buf);
2616}
2617
2618/* Implementation of emit_ops method "emit_stack_flush". */
2619
2620static void
2621aarch64_emit_stack_flush (void)
2622{
2623 uint32_t buf[16];
2624 uint32_t *p = buf;
2625
2626 p += emit_push (p, x0);
2627
2628 emit_ops_insns (buf, p - buf);
2629}
2630
2631/* Implementation of emit_ops method "emit_zero_ext". */
2632
2633static void
2634aarch64_emit_zero_ext (int arg)
2635{
2636 uint32_t buf[16];
2637 uint32_t *p = buf;
2638
2639 p += emit_ubfx (p, x0, x0, 0, arg);
2640
2641 emit_ops_insns (buf, p - buf);
2642}
2643
2644/* Implementation of emit_ops method "emit_swap". */
2645
2646static void
2647aarch64_emit_swap (void)
2648{
2649 uint32_t buf[16];
2650 uint32_t *p = buf;
2651
2652 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2653 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2654 p += emit_mov (p, x0, register_operand (x1));
2655
2656 emit_ops_insns (buf, p - buf);
2657}
2658
2659/* Implementation of emit_ops method "emit_stack_adjust". */
2660
2661static void
2662aarch64_emit_stack_adjust (int n)
2663{
2664 /* This is not needed with our design. */
2665 uint32_t buf[16];
2666 uint32_t *p = buf;
2667
2668 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2669
2670 emit_ops_insns (buf, p - buf);
2671}
2672
2673/* Implementation of emit_ops method "emit_int_call_1". */
2674
2675static void
2676aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2677{
2678 uint32_t buf[16];
2679 uint32_t *p = buf;
2680
2681 p += emit_mov (p, x0, immediate_operand (arg1));
2682
2683 emit_ops_insns (buf, p - buf);
2684
2685 aarch64_emit_call (fn);
2686}
2687
2688/* Implementation of emit_ops method "emit_void_call_2". */
2689
2690static void
2691aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2692{
2693 uint32_t buf[16];
2694 uint32_t *p = buf;
2695
2696 /* Push x0 on the stack. */
2697 aarch64_emit_stack_flush ();
2698
2699 /* Setup arguments for the function call:
2700
2701 x0: arg1
2702 x1: top of the stack
2703
2704 MOV x1, x0
2705 MOV x0, #arg1 */
2706
2707 p += emit_mov (p, x1, register_operand (x0));
2708 p += emit_mov (p, x0, immediate_operand (arg1));
2709
2710 emit_ops_insns (buf, p - buf);
2711
2712 aarch64_emit_call (fn);
2713
2714 /* Restore x0. */
2715 aarch64_emit_pop ();
2716}
2717
2718/* Implementation of emit_ops method "emit_eq_goto". */
2719
2720static void
2721aarch64_emit_eq_goto (int *offset_p, int *size_p)
2722{
2723 uint32_t buf[16];
2724 uint32_t *p = buf;
2725
2726 p += emit_pop (p, x1);
2727 p += emit_cmp (p, x1, register_operand (x0));
2728 /* Branch over the next instruction if x0 != x1. */
2729 p += emit_bcond (p, NE, 8);
2730 /* The NOP instruction will be patched with an unconditional branch. */
2731 if (offset_p)
2732 *offset_p = (p - buf) * 4;
2733 if (size_p)
2734 *size_p = 4;
2735 p += emit_nop (p);
2736
2737 emit_ops_insns (buf, p - buf);
2738}
2739
2740/* Implementation of emit_ops method "emit_ne_goto". */
2741
2742static void
2743aarch64_emit_ne_goto (int *offset_p, int *size_p)
2744{
2745 uint32_t buf[16];
2746 uint32_t *p = buf;
2747
2748 p += emit_pop (p, x1);
2749 p += emit_cmp (p, x1, register_operand (x0));
2750 /* Branch over the next instruction if x0 == x1. */
2751 p += emit_bcond (p, EQ, 8);
2752 /* The NOP instruction will be patched with an unconditional branch. */
2753 if (offset_p)
2754 *offset_p = (p - buf) * 4;
2755 if (size_p)
2756 *size_p = 4;
2757 p += emit_nop (p);
2758
2759 emit_ops_insns (buf, p - buf);
2760}
2761
2762/* Implementation of emit_ops method "emit_lt_goto". */
2763
2764static void
2765aarch64_emit_lt_goto (int *offset_p, int *size_p)
2766{
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2769
2770 p += emit_pop (p, x1);
2771 p += emit_cmp (p, x1, register_operand (x0));
2772 /* Branch over the next instruction if x0 >= x1. */
2773 p += emit_bcond (p, GE, 8);
2774 /* The NOP instruction will be patched with an unconditional branch. */
2775 if (offset_p)
2776 *offset_p = (p - buf) * 4;
2777 if (size_p)
2778 *size_p = 4;
2779 p += emit_nop (p);
2780
2781 emit_ops_insns (buf, p - buf);
2782}
2783
2784/* Implementation of emit_ops method "emit_le_goto". */
2785
2786static void
2787aarch64_emit_le_goto (int *offset_p, int *size_p)
2788{
2789 uint32_t buf[16];
2790 uint32_t *p = buf;
2791
2792 p += emit_pop (p, x1);
2793 p += emit_cmp (p, x1, register_operand (x0));
2794 /* Branch over the next instruction if x0 > x1. */
2795 p += emit_bcond (p, GT, 8);
2796 /* The NOP instruction will be patched with an unconditional branch. */
2797 if (offset_p)
2798 *offset_p = (p - buf) * 4;
2799 if (size_p)
2800 *size_p = 4;
2801 p += emit_nop (p);
2802
2803 emit_ops_insns (buf, p - buf);
2804}
2805
2806/* Implementation of emit_ops method "emit_gt_goto". */
2807
2808static void
2809aarch64_emit_gt_goto (int *offset_p, int *size_p)
2810{
2811 uint32_t buf[16];
2812 uint32_t *p = buf;
2813
2814 p += emit_pop (p, x1);
2815 p += emit_cmp (p, x1, register_operand (x0));
2816 /* Branch over the next instruction if x0 <= x1. */
2817 p += emit_bcond (p, LE, 8);
2818 /* The NOP instruction will be patched with an unconditional branch. */
2819 if (offset_p)
2820 *offset_p = (p - buf) * 4;
2821 if (size_p)
2822 *size_p = 4;
2823 p += emit_nop (p);
2824
2825 emit_ops_insns (buf, p - buf);
2826}
2827
2828/* Implementation of emit_ops method "emit_ge_got". */
2829
2830static void
2831aarch64_emit_ge_got (int *offset_p, int *size_p)
2832{
2833 uint32_t buf[16];
2834 uint32_t *p = buf;
2835
2836 p += emit_pop (p, x1);
2837 p += emit_cmp (p, x1, register_operand (x0));
2838 /* Branch over the next instruction if x0 <= x1. */
2839 p += emit_bcond (p, LT, 8);
2840 /* The NOP instruction will be patched with an unconditional branch. */
2841 if (offset_p)
2842 *offset_p = (p - buf) * 4;
2843 if (size_p)
2844 *size_p = 4;
2845 p += emit_nop (p);
2846
2847 emit_ops_insns (buf, p - buf);
2848}
2849
2850static struct emit_ops aarch64_emit_ops_impl =
2851{
2852 aarch64_emit_prologue,
2853 aarch64_emit_epilogue,
2854 aarch64_emit_add,
2855 aarch64_emit_sub,
2856 aarch64_emit_mul,
2857 aarch64_emit_lsh,
2858 aarch64_emit_rsh_signed,
2859 aarch64_emit_rsh_unsigned,
2860 aarch64_emit_ext,
2861 aarch64_emit_log_not,
2862 aarch64_emit_bit_and,
2863 aarch64_emit_bit_or,
2864 aarch64_emit_bit_xor,
2865 aarch64_emit_bit_not,
2866 aarch64_emit_equal,
2867 aarch64_emit_less_signed,
2868 aarch64_emit_less_unsigned,
2869 aarch64_emit_ref,
2870 aarch64_emit_if_goto,
2871 aarch64_emit_goto,
2872 aarch64_write_goto_address,
2873 aarch64_emit_const,
2874 aarch64_emit_call,
2875 aarch64_emit_reg,
2876 aarch64_emit_pop,
2877 aarch64_emit_stack_flush,
2878 aarch64_emit_zero_ext,
2879 aarch64_emit_swap,
2880 aarch64_emit_stack_adjust,
2881 aarch64_emit_int_call_1,
2882 aarch64_emit_void_call_2,
2883 aarch64_emit_eq_goto,
2884 aarch64_emit_ne_goto,
2885 aarch64_emit_lt_goto,
2886 aarch64_emit_le_goto,
2887 aarch64_emit_gt_goto,
2888 aarch64_emit_ge_got,
2889};
2890
2891/* Implementation of linux_target_ops method "emit_ops". */
2892
2893static struct emit_ops *
2894aarch64_emit_ops (void)
2895{
2896 return &aarch64_emit_ops_impl;
2897}
2898
bb903df0
PL
2899/* Implementation of linux_target_ops method
2900 "get_min_fast_tracepoint_insn_len". */
2901
2902static int
2903aarch64_get_min_fast_tracepoint_insn_len (void)
2904{
2905 return 4;
2906}
2907
d1d0aea1
PL
2908/* Implementation of linux_target_ops method "supports_range_stepping". */
2909
2910static int
2911aarch64_supports_range_stepping (void)
2912{
2913 return 1;
2914}
2915
176eb98c
MS
2916struct linux_target_ops the_low_target =
2917{
2918 aarch64_arch_setup,
3aee8918 2919 aarch64_regs_info,
176eb98c
MS
2920 aarch64_cannot_fetch_register,
2921 aarch64_cannot_store_register,
421530db 2922 NULL, /* fetch_register */
176eb98c
MS
2923 aarch64_get_pc,
2924 aarch64_set_pc,
2925 (const unsigned char *) &aarch64_breakpoint,
2926 aarch64_breakpoint_len,
421530db
PL
2927 NULL, /* breakpoint_reinsert_addr */
2928 0, /* decr_pc_after_break */
176eb98c 2929 aarch64_breakpoint_at,
802e8e6d 2930 aarch64_supports_z_point_type,
176eb98c
MS
2931 aarch64_insert_point,
2932 aarch64_remove_point,
2933 aarch64_stopped_by_watchpoint,
2934 aarch64_stopped_data_address,
421530db
PL
2935 NULL, /* collect_ptrace_register */
2936 NULL, /* supply_ptrace_register */
ade90bde 2937 aarch64_linux_siginfo_fixup,
176eb98c
MS
2938 aarch64_linux_new_process,
2939 aarch64_linux_new_thread,
3a8a0396 2940 aarch64_linux_new_fork,
176eb98c 2941 aarch64_linux_prepare_to_resume,
421530db 2942 NULL, /* process_qsupported */
7671bf47 2943 aarch64_supports_tracepoints,
bb903df0
PL
2944 aarch64_get_thread_area,
2945 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 2946 aarch64_emit_ops,
bb903df0 2947 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 2948 aarch64_supports_range_stepping,
176eb98c 2949};
3aee8918
PA
2950
2951void
2952initialize_low_arch (void)
2953{
2954 init_registers_aarch64 ();
2955
3b53ae99
YQ
2956 initialize_low_arch_aarch32 ();
2957
3aee8918
PA
2958 initialize_regsets_info (&aarch64_regsets_info);
2959}
This page took 0.389087 seconds and 4 git commands to generate.