[PowerPC] Consolidate linux vector regset sizes
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2018 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43
44 #ifdef HAVE_SYS_REG_H
45 #include <sys/reg.h>
46 #endif
47
48 /* Per-process arch-specific data we want to keep. */
49
50 struct arch_process_info
51 {
52 /* Hardware breakpoint/watchpoint data.
53 The reason for them to be per-process rather than per-thread is
54 due to the lack of information in the gdbserver environment;
55 gdbserver is not told that whether a requested hardware
56 breakpoint/watchpoint is thread specific or not, so it has to set
57 each hw bp/wp for every thread in the current process. The
58 higher level bp/wp management in gdb will resume a thread if a hw
59 bp/wp trap is not expected for it. Since the hw bp/wp setting is
60 same for each thread, it is reasonable for the data to live here.
61 */
62 struct aarch64_debug_reg_state debug_reg_state;
63 };
64
65 /* Return true if the size of register 0 is 8 byte. */
66
67 static int
68 is_64bit_tdesc (void)
69 {
70 struct regcache *regcache = get_thread_regcache (current_thread, 0);
71
72 return register_size (regcache->tdesc, 0) == 8;
73 }
74
75 /* Implementation of linux_target_ops method "cannot_store_register". */
76
77 static int
78 aarch64_cannot_store_register (int regno)
79 {
80 return regno >= AARCH64_NUM_REGS;
81 }
82
83 /* Implementation of linux_target_ops method "cannot_fetch_register". */
84
85 static int
86 aarch64_cannot_fetch_register (int regno)
87 {
88 return regno >= AARCH64_NUM_REGS;
89 }
90
91 static void
92 aarch64_fill_gregset (struct regcache *regcache, void *buf)
93 {
94 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
95 int i;
96
97 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
98 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
99 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
100 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
101 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
102 }
103
104 static void
105 aarch64_store_gregset (struct regcache *regcache, const void *buf)
106 {
107 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
108 int i;
109
110 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
111 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
112 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
113 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
114 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
115 }
116
117 static void
118 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
119 {
120 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
121 int i;
122
123 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
124 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
125 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
126 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
127 }
128
129 static void
130 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
131 {
132 const struct user_fpsimd_state *regset
133 = (const struct user_fpsimd_state *) buf;
134 int i;
135
136 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
137 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
138 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
139 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
140 }
141
142 /* Enable miscellaneous debugging output. The name is historical - it
143 was originally used to debug LinuxThreads support. */
144 extern int debug_threads;
145
146 /* Implementation of linux_target_ops method "get_pc". */
147
148 static CORE_ADDR
149 aarch64_get_pc (struct regcache *regcache)
150 {
151 if (register_size (regcache->tdesc, 0) == 8)
152 return linux_get_pc_64bit (regcache);
153 else
154 return linux_get_pc_32bit (regcache);
155 }
156
157 /* Implementation of linux_target_ops method "set_pc". */
158
159 static void
160 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
161 {
162 if (register_size (regcache->tdesc, 0) == 8)
163 linux_set_pc_64bit (regcache, pc);
164 else
165 linux_set_pc_32bit (regcache, pc);
166 }
167
168 #define aarch64_breakpoint_len 4
169
170 /* AArch64 BRK software debug mode instruction.
171 This instruction needs to match gdb/aarch64-tdep.c
172 (aarch64_default_breakpoint). */
173 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
174
175 /* Implementation of linux_target_ops method "breakpoint_at". */
176
177 static int
178 aarch64_breakpoint_at (CORE_ADDR where)
179 {
180 if (is_64bit_tdesc ())
181 {
182 gdb_byte insn[aarch64_breakpoint_len];
183
184 (*the_target->read_memory) (where, (unsigned char *) &insn,
185 aarch64_breakpoint_len);
186 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
187 return 1;
188
189 return 0;
190 }
191 else
192 return arm_breakpoint_at (where);
193 }
194
195 static void
196 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
197 {
198 int i;
199
200 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
201 {
202 state->dr_addr_bp[i] = 0;
203 state->dr_ctrl_bp[i] = 0;
204 state->dr_ref_count_bp[i] = 0;
205 }
206
207 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
208 {
209 state->dr_addr_wp[i] = 0;
210 state->dr_ctrl_wp[i] = 0;
211 state->dr_ref_count_wp[i] = 0;
212 }
213 }
214
215 /* Return the pointer to the debug register state structure in the
216 current process' arch-specific data area. */
217
218 struct aarch64_debug_reg_state *
219 aarch64_get_debug_reg_state (pid_t pid)
220 {
221 struct process_info *proc = find_process_pid (pid);
222
223 return &proc->priv->arch_private->debug_reg_state;
224 }
225
226 /* Implementation of linux_target_ops method "supports_z_point_type". */
227
228 static int
229 aarch64_supports_z_point_type (char z_type)
230 {
231 switch (z_type)
232 {
233 case Z_PACKET_SW_BP:
234 case Z_PACKET_HW_BP:
235 case Z_PACKET_WRITE_WP:
236 case Z_PACKET_READ_WP:
237 case Z_PACKET_ACCESS_WP:
238 return 1;
239 default:
240 return 0;
241 }
242 }
243
244 /* Implementation of linux_target_ops method "insert_point".
245
246 It actually only records the info of the to-be-inserted bp/wp;
247 the actual insertion will happen when threads are resumed. */
248
249 static int
250 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
251 int len, struct raw_breakpoint *bp)
252 {
253 int ret;
254 enum target_hw_bp_type targ_type;
255 struct aarch64_debug_reg_state *state
256 = aarch64_get_debug_reg_state (pid_of (current_thread));
257
258 if (show_debug_regs)
259 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
260 (unsigned long) addr, len);
261
262 /* Determine the type from the raw breakpoint type. */
263 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
264
265 if (targ_type != hw_execute)
266 {
267 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
268 ret = aarch64_handle_watchpoint (targ_type, addr, len,
269 1 /* is_insert */, state);
270 else
271 ret = -1;
272 }
273 else
274 {
275 if (len == 3)
276 {
277 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
278 instruction. Set it to 2 to correctly encode length bit
279 mask in hardware/watchpoint control register. */
280 len = 2;
281 }
282 ret = aarch64_handle_breakpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
284 }
285
286 if (show_debug_regs)
287 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
288 targ_type);
289
290 return ret;
291 }
292
293 /* Implementation of linux_target_ops method "remove_point".
294
295 It actually only records the info of the to-be-removed bp/wp,
296 the actual removal will be done when threads are resumed. */
297
298 static int
299 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
300 int len, struct raw_breakpoint *bp)
301 {
302 int ret;
303 enum target_hw_bp_type targ_type;
304 struct aarch64_debug_reg_state *state
305 = aarch64_get_debug_reg_state (pid_of (current_thread));
306
307 if (show_debug_regs)
308 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
309 (unsigned long) addr, len);
310
311 /* Determine the type from the raw breakpoint type. */
312 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
313
314 /* Set up state pointers. */
315 if (targ_type != hw_execute)
316 ret =
317 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
318 state);
319 else
320 {
321 if (len == 3)
322 {
323 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
324 instruction. Set it to 2 to correctly encode length bit
325 mask in hardware/watchpoint control register. */
326 len = 2;
327 }
328 ret = aarch64_handle_breakpoint (targ_type, addr, len,
329 0 /* is_insert */, state);
330 }
331
332 if (show_debug_regs)
333 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
334 targ_type);
335
336 return ret;
337 }
338
339 /* Implementation of linux_target_ops method "stopped_data_address". */
340
341 static CORE_ADDR
342 aarch64_stopped_data_address (void)
343 {
344 siginfo_t siginfo;
345 int pid, i;
346 struct aarch64_debug_reg_state *state;
347
348 pid = lwpid_of (current_thread);
349
350 /* Get the siginfo. */
351 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
352 return (CORE_ADDR) 0;
353
354 /* Need to be a hardware breakpoint/watchpoint trap. */
355 if (siginfo.si_signo != SIGTRAP
356 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
357 return (CORE_ADDR) 0;
358
359 /* Check if the address matches any watched address. */
360 state = aarch64_get_debug_reg_state (pid_of (current_thread));
361 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
362 {
363 const unsigned int offset
364 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
365 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
366 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
367 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
368 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
369 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
370
371 if (state->dr_ref_count_wp[i]
372 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
373 && addr_trap >= addr_watch_aligned
374 && addr_trap < addr_watch + len)
375 {
376 /* ADDR_TRAP reports the first address of the memory range
377 accessed by the CPU, regardless of what was the memory
378 range watched. Thus, a large CPU access that straddles
379 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
380 ADDR_TRAP that is lower than the
381 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
382
383 addr: | 4 | 5 | 6 | 7 | 8 |
384 |---- range watched ----|
385 |----------- range accessed ------------|
386
387 In this case, ADDR_TRAP will be 4.
388
389 To match a watchpoint known to GDB core, we must never
390 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
391 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
392 positive on kernels older than 4.10. See PR
393 external/20207. */
394 return addr_orig;
395 }
396 }
397
398 return (CORE_ADDR) 0;
399 }
400
401 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
402
403 static int
404 aarch64_stopped_by_watchpoint (void)
405 {
406 if (aarch64_stopped_data_address () != 0)
407 return 1;
408 else
409 return 0;
410 }
411
412 /* Fetch the thread-local storage pointer for libthread_db. */
413
414 ps_err_e
415 ps_get_thread_area (struct ps_prochandle *ph,
416 lwpid_t lwpid, int idx, void **base)
417 {
418 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
419 is_64bit_tdesc ());
420 }
421
422 /* Implementation of linux_target_ops method "siginfo_fixup". */
423
424 static int
425 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
426 {
427 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
428 if (!is_64bit_tdesc ())
429 {
430 if (direction == 0)
431 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
432 native);
433 else
434 aarch64_siginfo_from_compat_siginfo (native,
435 (struct compat_siginfo *) inf);
436
437 return 1;
438 }
439
440 return 0;
441 }
442
443 /* Implementation of linux_target_ops method "new_process". */
444
445 static struct arch_process_info *
446 aarch64_linux_new_process (void)
447 {
448 struct arch_process_info *info = XCNEW (struct arch_process_info);
449
450 aarch64_init_debug_reg_state (&info->debug_reg_state);
451
452 return info;
453 }
454
455 /* Implementation of linux_target_ops method "delete_process". */
456
457 static void
458 aarch64_linux_delete_process (struct arch_process_info *info)
459 {
460 xfree (info);
461 }
462
463 /* Implementation of linux_target_ops method "linux_new_fork". */
464
465 static void
466 aarch64_linux_new_fork (struct process_info *parent,
467 struct process_info *child)
468 {
469 /* These are allocated by linux_add_process. */
470 gdb_assert (parent->priv != NULL
471 && parent->priv->arch_private != NULL);
472 gdb_assert (child->priv != NULL
473 && child->priv->arch_private != NULL);
474
475 /* Linux kernel before 2.6.33 commit
476 72f674d203cd230426437cdcf7dd6f681dad8b0d
477 will inherit hardware debug registers from parent
478 on fork/vfork/clone. Newer Linux kernels create such tasks with
479 zeroed debug registers.
480
481 GDB core assumes the child inherits the watchpoints/hw
482 breakpoints of the parent, and will remove them all from the
483 forked off process. Copy the debug registers mirrors into the
484 new process so that all breakpoints and watchpoints can be
485 removed together. The debug registers mirror will become zeroed
486 in the end before detaching the forked off process, thus making
487 this compatible with older Linux kernels too. */
488
489 *child->priv->arch_private = *parent->priv->arch_private;
490 }
491
492 /* Implementation of linux_target_ops method "arch_setup". */
493
494 static void
495 aarch64_arch_setup (void)
496 {
497 unsigned int machine;
498 int is_elf64;
499 int tid;
500
501 tid = lwpid_of (current_thread);
502
503 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
504
505 if (is_elf64)
506 current_process ()->tdesc = aarch64_linux_read_description ();
507 else
508 current_process ()->tdesc = tdesc_arm_with_neon;
509
510 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
511 }
512
513 static struct regset_info aarch64_regsets[] =
514 {
515 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
516 sizeof (struct user_pt_regs), GENERAL_REGS,
517 aarch64_fill_gregset, aarch64_store_gregset },
518 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
519 sizeof (struct user_fpsimd_state), FP_REGS,
520 aarch64_fill_fpregset, aarch64_store_fpregset
521 },
522 NULL_REGSET
523 };
524
525 static struct regsets_info aarch64_regsets_info =
526 {
527 aarch64_regsets, /* regsets */
528 0, /* num_regsets */
529 NULL, /* disabled_regsets */
530 };
531
532 static struct regs_info regs_info_aarch64 =
533 {
534 NULL, /* regset_bitmap */
535 NULL, /* usrregs */
536 &aarch64_regsets_info,
537 };
538
539 /* Implementation of linux_target_ops method "regs_info". */
540
541 static const struct regs_info *
542 aarch64_regs_info (void)
543 {
544 if (is_64bit_tdesc ())
545 return &regs_info_aarch64;
546 else
547 return &regs_info_aarch32;
548 }
549
550 /* Implementation of linux_target_ops method "supports_tracepoints". */
551
552 static int
553 aarch64_supports_tracepoints (void)
554 {
555 if (current_thread == NULL)
556 return 1;
557 else
558 {
559 /* We don't support tracepoints on aarch32 now. */
560 return is_64bit_tdesc ();
561 }
562 }
563
564 /* Implementation of linux_target_ops method "get_thread_area". */
565
566 static int
567 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
568 {
569 struct iovec iovec;
570 uint64_t reg;
571
572 iovec.iov_base = &reg;
573 iovec.iov_len = sizeof (reg);
574
575 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
576 return -1;
577
578 *addrp = reg;
579
580 return 0;
581 }
582
583 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
584
585 static void
586 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
587 {
588 int use_64bit = register_size (regcache->tdesc, 0) == 8;
589
590 if (use_64bit)
591 {
592 long l_sysno;
593
594 collect_register_by_name (regcache, "x8", &l_sysno);
595 *sysno = (int) l_sysno;
596 }
597 else
598 collect_register_by_name (regcache, "r7", sysno);
599 }
600
601 /* List of condition codes that we need. */
602
603 enum aarch64_condition_codes
604 {
605 EQ = 0x0,
606 NE = 0x1,
607 LO = 0x3,
608 GE = 0xa,
609 LT = 0xb,
610 GT = 0xc,
611 LE = 0xd,
612 };
613
614 enum aarch64_operand_type
615 {
616 OPERAND_IMMEDIATE,
617 OPERAND_REGISTER,
618 };
619
620 /* Representation of an operand. At this time, it only supports register
621 and immediate types. */
622
623 struct aarch64_operand
624 {
625 /* Type of the operand. */
626 enum aarch64_operand_type type;
627
628 /* Value of the operand according to the type. */
629 union
630 {
631 uint32_t imm;
632 struct aarch64_register reg;
633 };
634 };
635
636 /* List of registers that we are currently using, we can add more here as
637 we need to use them. */
638
639 /* General purpose scratch registers (64 bit). */
640 static const struct aarch64_register x0 = { 0, 1 };
641 static const struct aarch64_register x1 = { 1, 1 };
642 static const struct aarch64_register x2 = { 2, 1 };
643 static const struct aarch64_register x3 = { 3, 1 };
644 static const struct aarch64_register x4 = { 4, 1 };
645
646 /* General purpose scratch registers (32 bit). */
647 static const struct aarch64_register w0 = { 0, 0 };
648 static const struct aarch64_register w2 = { 2, 0 };
649
650 /* Intra-procedure scratch registers. */
651 static const struct aarch64_register ip0 = { 16, 1 };
652
653 /* Special purpose registers. */
654 static const struct aarch64_register fp = { 29, 1 };
655 static const struct aarch64_register lr = { 30, 1 };
656 static const struct aarch64_register sp = { 31, 1 };
657 static const struct aarch64_register xzr = { 31, 1 };
658
659 /* Dynamically allocate a new register. If we know the register
660 statically, we should make it a global as above instead of using this
661 helper function. */
662
663 static struct aarch64_register
664 aarch64_register (unsigned num, int is64)
665 {
666 return (struct aarch64_register) { num, is64 };
667 }
668
669 /* Helper function to create a register operand, for instructions with
670 different types of operands.
671
672 For example:
673 p += emit_mov (p, x0, register_operand (x1)); */
674
675 static struct aarch64_operand
676 register_operand (struct aarch64_register reg)
677 {
678 struct aarch64_operand operand;
679
680 operand.type = OPERAND_REGISTER;
681 operand.reg = reg;
682
683 return operand;
684 }
685
686 /* Helper function to create an immediate operand, for instructions with
687 different types of operands.
688
689 For example:
690 p += emit_mov (p, x0, immediate_operand (12)); */
691
692 static struct aarch64_operand
693 immediate_operand (uint32_t imm)
694 {
695 struct aarch64_operand operand;
696
697 operand.type = OPERAND_IMMEDIATE;
698 operand.imm = imm;
699
700 return operand;
701 }
702
703 /* Helper function to create an offset memory operand.
704
705 For example:
706 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
707
708 static struct aarch64_memory_operand
709 offset_memory_operand (int32_t offset)
710 {
711 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
712 }
713
714 /* Helper function to create a pre-index memory operand.
715
716 For example:
717 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
718
719 static struct aarch64_memory_operand
720 preindex_memory_operand (int32_t index)
721 {
722 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
723 }
724
725 /* Helper function to create a post-index memory operand.
726
727 For example:
728 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
729
730 static struct aarch64_memory_operand
731 postindex_memory_operand (int32_t index)
732 {
733 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
734 }
735
736 /* System control registers. These special registers can be written and
737 read with the MRS and MSR instructions.
738
739 - NZCV: Condition flags. GDB refers to this register under the CPSR
740 name.
741 - FPSR: Floating-point status register.
742 - FPCR: Floating-point control registers.
743 - TPIDR_EL0: Software thread ID register. */
744
745 enum aarch64_system_control_registers
746 {
747 /* op0 op1 crn crm op2 */
748 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
749 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
750 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
751 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
752 };
753
754 /* Write a BLR instruction into *BUF.
755
756 BLR rn
757
758 RN is the register to branch to. */
759
760 static int
761 emit_blr (uint32_t *buf, struct aarch64_register rn)
762 {
763 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
764 }
765
766 /* Write a RET instruction into *BUF.
767
768 RET xn
769
770 RN is the register to branch to. */
771
772 static int
773 emit_ret (uint32_t *buf, struct aarch64_register rn)
774 {
775 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
776 }
777
778 static int
779 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
780 struct aarch64_register rt,
781 struct aarch64_register rt2,
782 struct aarch64_register rn,
783 struct aarch64_memory_operand operand)
784 {
785 uint32_t opc;
786 uint32_t pre_index;
787 uint32_t write_back;
788
789 if (rt.is64)
790 opc = ENCODE (2, 2, 30);
791 else
792 opc = ENCODE (0, 2, 30);
793
794 switch (operand.type)
795 {
796 case MEMORY_OPERAND_OFFSET:
797 {
798 pre_index = ENCODE (1, 1, 24);
799 write_back = ENCODE (0, 1, 23);
800 break;
801 }
802 case MEMORY_OPERAND_POSTINDEX:
803 {
804 pre_index = ENCODE (0, 1, 24);
805 write_back = ENCODE (1, 1, 23);
806 break;
807 }
808 case MEMORY_OPERAND_PREINDEX:
809 {
810 pre_index = ENCODE (1, 1, 24);
811 write_back = ENCODE (1, 1, 23);
812 break;
813 }
814 default:
815 return 0;
816 }
817
818 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
819 | ENCODE (operand.index >> 3, 7, 15)
820 | ENCODE (rt2.num, 5, 10)
821 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
822 }
823
824 /* Write a STP instruction into *BUF.
825
826 STP rt, rt2, [rn, #offset]
827 STP rt, rt2, [rn, #index]!
828 STP rt, rt2, [rn], #index
829
830 RT and RT2 are the registers to store.
831 RN is the base address register.
832 OFFSET is the immediate to add to the base address. It is limited to a
833 -512 .. 504 range (7 bits << 3). */
834
835 static int
836 emit_stp (uint32_t *buf, struct aarch64_register rt,
837 struct aarch64_register rt2, struct aarch64_register rn,
838 struct aarch64_memory_operand operand)
839 {
840 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
841 }
842
843 /* Write a LDP instruction into *BUF.
844
845 LDP rt, rt2, [rn, #offset]
846 LDP rt, rt2, [rn, #index]!
847 LDP rt, rt2, [rn], #index
848
849 RT and RT2 are the registers to store.
850 RN is the base address register.
851 OFFSET is the immediate to add to the base address. It is limited to a
852 -512 .. 504 range (7 bits << 3). */
853
854 static int
855 emit_ldp (uint32_t *buf, struct aarch64_register rt,
856 struct aarch64_register rt2, struct aarch64_register rn,
857 struct aarch64_memory_operand operand)
858 {
859 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
860 }
861
862 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
863
864 LDP qt, qt2, [rn, #offset]
865
866 RT and RT2 are the Q registers to store.
867 RN is the base address register.
868 OFFSET is the immediate to add to the base address. It is limited to
869 -1024 .. 1008 range (7 bits << 4). */
870
871 static int
872 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
873 struct aarch64_register rn, int32_t offset)
874 {
875 uint32_t opc = ENCODE (2, 2, 30);
876 uint32_t pre_index = ENCODE (1, 1, 24);
877
878 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
879 | ENCODE (offset >> 4, 7, 15)
880 | ENCODE (rt2, 5, 10)
881 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
882 }
883
884 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
885
886 STP qt, qt2, [rn, #offset]
887
888 RT and RT2 are the Q registers to store.
889 RN is the base address register.
890 OFFSET is the immediate to add to the base address. It is limited to
891 -1024 .. 1008 range (7 bits << 4). */
892
893 static int
894 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
895 struct aarch64_register rn, int32_t offset)
896 {
897 uint32_t opc = ENCODE (2, 2, 30);
898 uint32_t pre_index = ENCODE (1, 1, 24);
899
900 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
901 | ENCODE (offset >> 4, 7, 15)
902 | ENCODE (rt2, 5, 10)
903 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
904 }
905
906 /* Write a LDRH instruction into *BUF.
907
908 LDRH wt, [xn, #offset]
909 LDRH wt, [xn, #index]!
910 LDRH wt, [xn], #index
911
912 RT is the register to store.
913 RN is the base address register.
914 OFFSET is the immediate to add to the base address. It is limited to
915 0 .. 32760 range (12 bits << 3). */
916
917 static int
918 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
919 struct aarch64_register rn,
920 struct aarch64_memory_operand operand)
921 {
922 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
923 }
924
925 /* Write a LDRB instruction into *BUF.
926
927 LDRB wt, [xn, #offset]
928 LDRB wt, [xn, #index]!
929 LDRB wt, [xn], #index
930
931 RT is the register to store.
932 RN is the base address register.
933 OFFSET is the immediate to add to the base address. It is limited to
934 0 .. 32760 range (12 bits << 3). */
935
936 static int
937 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
938 struct aarch64_register rn,
939 struct aarch64_memory_operand operand)
940 {
941 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
942 }
943
944
945
946 /* Write a STR instruction into *BUF.
947
948 STR rt, [rn, #offset]
949 STR rt, [rn, #index]!
950 STR rt, [rn], #index
951
952 RT is the register to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to
955 0 .. 32760 range (12 bits << 3). */
956
957 static int
958 emit_str (uint32_t *buf, struct aarch64_register rt,
959 struct aarch64_register rn,
960 struct aarch64_memory_operand operand)
961 {
962 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
963 }
964
965 /* Helper function emitting an exclusive load or store instruction. */
966
967 static int
968 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
969 enum aarch64_opcodes opcode,
970 struct aarch64_register rs,
971 struct aarch64_register rt,
972 struct aarch64_register rt2,
973 struct aarch64_register rn)
974 {
975 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
976 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
977 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
978 }
979
980 /* Write a LAXR instruction into *BUF.
981
982 LDAXR rt, [xn]
983
984 RT is the destination register.
985 RN is the base address register. */
986
987 static int
988 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
989 struct aarch64_register rn)
990 {
991 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
992 xzr, rn);
993 }
994
995 /* Write a STXR instruction into *BUF.
996
997 STXR ws, rt, [xn]
998
999 RS is the result register, it indicates if the store succeeded or not.
1000 RT is the destination register.
1001 RN is the base address register. */
1002
1003 static int
1004 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1005 struct aarch64_register rt, struct aarch64_register rn)
1006 {
1007 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1008 xzr, rn);
1009 }
1010
1011 /* Write a STLR instruction into *BUF.
1012
1013 STLR rt, [xn]
1014
1015 RT is the register to store.
1016 RN is the base address register. */
1017
1018 static int
1019 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1020 struct aarch64_register rn)
1021 {
1022 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1023 xzr, rn);
1024 }
1025
1026 /* Helper function for data processing instructions with register sources. */
1027
1028 static int
1029 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1030 struct aarch64_register rd,
1031 struct aarch64_register rn,
1032 struct aarch64_register rm)
1033 {
1034 uint32_t size = ENCODE (rd.is64, 1, 31);
1035
1036 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1037 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1038 }
1039
1040 /* Helper function for data processing instructions taking either a register
1041 or an immediate. */
1042
1043 static int
1044 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1045 struct aarch64_register rd,
1046 struct aarch64_register rn,
1047 struct aarch64_operand operand)
1048 {
1049 uint32_t size = ENCODE (rd.is64, 1, 31);
1050 /* The opcode is different for register and immediate source operands. */
1051 uint32_t operand_opcode;
1052
1053 if (operand.type == OPERAND_IMMEDIATE)
1054 {
1055 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1056 operand_opcode = ENCODE (8, 4, 25);
1057
1058 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1059 | ENCODE (operand.imm, 12, 10)
1060 | ENCODE (rn.num, 5, 5)
1061 | ENCODE (rd.num, 5, 0));
1062 }
1063 else
1064 {
1065 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1066 operand_opcode = ENCODE (5, 4, 25);
1067
1068 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1069 rn, operand.reg);
1070 }
1071 }
1072
1073 /* Write an ADD instruction into *BUF.
1074
1075 ADD rd, rn, #imm
1076 ADD rd, rn, rm
1077
1078 This function handles both an immediate and register add.
1079
1080 RD is the destination register.
1081 RN is the input register.
1082 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1083 OPERAND_REGISTER. */
1084
1085 static int
1086 emit_add (uint32_t *buf, struct aarch64_register rd,
1087 struct aarch64_register rn, struct aarch64_operand operand)
1088 {
1089 return emit_data_processing (buf, ADD, rd, rn, operand);
1090 }
1091
1092 /* Write a SUB instruction into *BUF.
1093
1094 SUB rd, rn, #imm
1095 SUB rd, rn, rm
1096
1097 This function handles both an immediate and register sub.
1098
1099 RD is the destination register.
1100 RN is the input register.
1101 IMM is the immediate to substract to RN. */
1102
1103 static int
1104 emit_sub (uint32_t *buf, struct aarch64_register rd,
1105 struct aarch64_register rn, struct aarch64_operand operand)
1106 {
1107 return emit_data_processing (buf, SUB, rd, rn, operand);
1108 }
1109
1110 /* Write a MOV instruction into *BUF.
1111
1112 MOV rd, #imm
1113 MOV rd, rm
1114
1115 This function handles both a wide immediate move and a register move,
1116 with the condition that the source register is not xzr. xzr and the
1117 stack pointer share the same encoding and this function only supports
1118 the stack pointer.
1119
1120 RD is the destination register.
1121 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1122 OPERAND_REGISTER. */
1123
1124 static int
1125 emit_mov (uint32_t *buf, struct aarch64_register rd,
1126 struct aarch64_operand operand)
1127 {
1128 if (operand.type == OPERAND_IMMEDIATE)
1129 {
1130 uint32_t size = ENCODE (rd.is64, 1, 31);
1131 /* Do not shift the immediate. */
1132 uint32_t shift = ENCODE (0, 2, 21);
1133
1134 return aarch64_emit_insn (buf, MOV | size | shift
1135 | ENCODE (operand.imm, 16, 5)
1136 | ENCODE (rd.num, 5, 0));
1137 }
1138 else
1139 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1140 }
1141
1142 /* Write a MOVK instruction into *BUF.
1143
1144 MOVK rd, #imm, lsl #shift
1145
1146 RD is the destination register.
1147 IMM is the immediate.
1148 SHIFT is the logical shift left to apply to IMM. */
1149
1150 static int
1151 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1152 unsigned shift)
1153 {
1154 uint32_t size = ENCODE (rd.is64, 1, 31);
1155
1156 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1157 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1158 }
1159
1160 /* Write instructions into *BUF in order to move ADDR into a register.
1161 ADDR can be a 64-bit value.
1162
1163 This function will emit a series of MOV and MOVK instructions, such as:
1164
1165 MOV xd, #(addr)
1166 MOVK xd, #(addr >> 16), lsl #16
1167 MOVK xd, #(addr >> 32), lsl #32
1168 MOVK xd, #(addr >> 48), lsl #48 */
1169
1170 static int
1171 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1172 {
1173 uint32_t *p = buf;
1174
1175 /* The MOV (wide immediate) instruction clears to top bits of the
1176 register. */
1177 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1178
1179 if ((addr >> 16) != 0)
1180 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1181 else
1182 return p - buf;
1183
1184 if ((addr >> 32) != 0)
1185 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1186 else
1187 return p - buf;
1188
1189 if ((addr >> 48) != 0)
1190 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1191
1192 return p - buf;
1193 }
1194
1195 /* Write a SUBS instruction into *BUF.
1196
1197 SUBS rd, rn, rm
1198
1199 This instruction update the condition flags.
1200
1201 RD is the destination register.
1202 RN and RM are the source registers. */
1203
1204 static int
1205 emit_subs (uint32_t *buf, struct aarch64_register rd,
1206 struct aarch64_register rn, struct aarch64_operand operand)
1207 {
1208 return emit_data_processing (buf, SUBS, rd, rn, operand);
1209 }
1210
1211 /* Write a CMP instruction into *BUF.
1212
1213 CMP rn, rm
1214
1215 This instruction is an alias of SUBS xzr, rn, rm.
1216
1217 RN and RM are the registers to compare. */
1218
1219 static int
1220 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1221 struct aarch64_operand operand)
1222 {
1223 return emit_subs (buf, xzr, rn, operand);
1224 }
1225
1226 /* Write a AND instruction into *BUF.
1227
1228 AND rd, rn, rm
1229
1230 RD is the destination register.
1231 RN and RM are the source registers. */
1232
1233 static int
1234 emit_and (uint32_t *buf, struct aarch64_register rd,
1235 struct aarch64_register rn, struct aarch64_register rm)
1236 {
1237 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1238 }
1239
1240 /* Write a ORR instruction into *BUF.
1241
1242 ORR rd, rn, rm
1243
1244 RD is the destination register.
1245 RN and RM are the source registers. */
1246
1247 static int
1248 emit_orr (uint32_t *buf, struct aarch64_register rd,
1249 struct aarch64_register rn, struct aarch64_register rm)
1250 {
1251 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1252 }
1253
1254 /* Write a ORN instruction into *BUF.
1255
1256 ORN rd, rn, rm
1257
1258 RD is the destination register.
1259 RN and RM are the source registers. */
1260
1261 static int
1262 emit_orn (uint32_t *buf, struct aarch64_register rd,
1263 struct aarch64_register rn, struct aarch64_register rm)
1264 {
1265 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1266 }
1267
1268 /* Write a EOR instruction into *BUF.
1269
1270 EOR rd, rn, rm
1271
1272 RD is the destination register.
1273 RN and RM are the source registers. */
1274
1275 static int
1276 emit_eor (uint32_t *buf, struct aarch64_register rd,
1277 struct aarch64_register rn, struct aarch64_register rm)
1278 {
1279 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1280 }
1281
1282 /* Write a MVN instruction into *BUF.
1283
1284 MVN rd, rm
1285
1286 This is an alias for ORN rd, xzr, rm.
1287
1288 RD is the destination register.
1289 RM is the source register. */
1290
1291 static int
1292 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1293 struct aarch64_register rm)
1294 {
1295 return emit_orn (buf, rd, xzr, rm);
1296 }
1297
1298 /* Write a LSLV instruction into *BUF.
1299
1300 LSLV rd, rn, rm
1301
1302 RD is the destination register.
1303 RN and RM are the source registers. */
1304
1305 static int
1306 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1307 struct aarch64_register rn, struct aarch64_register rm)
1308 {
1309 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1310 }
1311
1312 /* Write a LSRV instruction into *BUF.
1313
1314 LSRV rd, rn, rm
1315
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1318
1319 static int
1320 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1321 struct aarch64_register rn, struct aarch64_register rm)
1322 {
1323 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1324 }
1325
1326 /* Write a ASRV instruction into *BUF.
1327
1328 ASRV rd, rn, rm
1329
1330 RD is the destination register.
1331 RN and RM are the source registers. */
1332
1333 static int
1334 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1335 struct aarch64_register rn, struct aarch64_register rm)
1336 {
1337 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1338 }
1339
1340 /* Write a MUL instruction into *BUF.
1341
1342 MUL rd, rn, rm
1343
1344 RD is the destination register.
1345 RN and RM are the source registers. */
1346
1347 static int
1348 emit_mul (uint32_t *buf, struct aarch64_register rd,
1349 struct aarch64_register rn, struct aarch64_register rm)
1350 {
1351 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1352 }
1353
1354 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1355
1356 MRS xt, system_reg
1357
1358 RT is the destination register.
1359 SYSTEM_REG is special purpose register to read. */
1360
1361 static int
1362 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1363 enum aarch64_system_control_registers system_reg)
1364 {
1365 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1366 | ENCODE (rt.num, 5, 0));
1367 }
1368
1369 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1370
1371 MSR system_reg, xt
1372
1373 SYSTEM_REG is special purpose register to write.
1374 RT is the input register. */
1375
1376 static int
1377 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1378 struct aarch64_register rt)
1379 {
1380 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1381 | ENCODE (rt.num, 5, 0));
1382 }
1383
1384 /* Write a SEVL instruction into *BUF.
1385
1386 This is a hint instruction telling the hardware to trigger an event. */
1387
1388 static int
1389 emit_sevl (uint32_t *buf)
1390 {
1391 return aarch64_emit_insn (buf, SEVL);
1392 }
1393
1394 /* Write a WFE instruction into *BUF.
1395
1396 This is a hint instruction telling the hardware to wait for an event. */
1397
1398 static int
1399 emit_wfe (uint32_t *buf)
1400 {
1401 return aarch64_emit_insn (buf, WFE);
1402 }
1403
1404 /* Write a SBFM instruction into *BUF.
1405
1406 SBFM rd, rn, #immr, #imms
1407
1408 This instruction moves the bits from #immr to #imms into the
1409 destination, sign extending the result.
1410
1411 RD is the destination register.
1412 RN is the source register.
1413 IMMR is the bit number to start at (least significant bit).
1414 IMMS is the bit number to stop at (most significant bit). */
1415
1416 static int
1417 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1418 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1419 {
1420 uint32_t size = ENCODE (rd.is64, 1, 31);
1421 uint32_t n = ENCODE (rd.is64, 1, 22);
1422
1423 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1424 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1425 | ENCODE (rd.num, 5, 0));
1426 }
1427
1428 /* Write a SBFX instruction into *BUF.
1429
1430 SBFX rd, rn, #lsb, #width
1431
1432 This instruction moves #width bits from #lsb into the destination, sign
1433 extending the result. This is an alias for:
1434
1435 SBFM rd, rn, #lsb, #(lsb + width - 1)
1436
1437 RD is the destination register.
1438 RN is the source register.
1439 LSB is the bit number to start at (least significant bit).
1440 WIDTH is the number of bits to move. */
1441
1442 static int
1443 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1444 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1445 {
1446 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1447 }
1448
1449 /* Write a UBFM instruction into *BUF.
1450
1451 UBFM rd, rn, #immr, #imms
1452
1453 This instruction moves the bits from #immr to #imms into the
1454 destination, extending the result with zeros.
1455
1456 RD is the destination register.
1457 RN is the source register.
1458 IMMR is the bit number to start at (least significant bit).
1459 IMMS is the bit number to stop at (most significant bit). */
1460
1461 static int
1462 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1463 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1464 {
1465 uint32_t size = ENCODE (rd.is64, 1, 31);
1466 uint32_t n = ENCODE (rd.is64, 1, 22);
1467
1468 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1469 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1470 | ENCODE (rd.num, 5, 0));
1471 }
1472
1473 /* Write a UBFX instruction into *BUF.
1474
1475 UBFX rd, rn, #lsb, #width
1476
1477 This instruction moves #width bits from #lsb into the destination,
1478 extending the result with zeros. This is an alias for:
1479
1480 UBFM rd, rn, #lsb, #(lsb + width - 1)
1481
1482 RD is the destination register.
1483 RN is the source register.
1484 LSB is the bit number to start at (least significant bit).
1485 WIDTH is the number of bits to move. */
1486
1487 static int
1488 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1489 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1490 {
1491 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1492 }
1493
1494 /* Write a CSINC instruction into *BUF.
1495
1496 CSINC rd, rn, rm, cond
1497
1498 This instruction conditionally increments rn or rm and places the result
1499 in rd. rn is chosen is the condition is true.
1500
1501 RD is the destination register.
1502 RN and RM are the source registers.
1503 COND is the encoded condition. */
1504
1505 static int
1506 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1507 struct aarch64_register rn, struct aarch64_register rm,
1508 unsigned cond)
1509 {
1510 uint32_t size = ENCODE (rd.is64, 1, 31);
1511
1512 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1513 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1514 | ENCODE (rd.num, 5, 0));
1515 }
1516
1517 /* Write a CSET instruction into *BUF.
1518
1519 CSET rd, cond
1520
1521 This instruction conditionally write 1 or 0 in the destination register.
1522 1 is written if the condition is true. This is an alias for:
1523
1524 CSINC rd, xzr, xzr, !cond
1525
1526 Note that the condition needs to be inverted.
1527
1528 RD is the destination register.
1529 RN and RM are the source registers.
1530 COND is the encoded condition. */
1531
1532 static int
1533 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1534 {
1535 /* The least significant bit of the condition needs toggling in order to
1536 invert it. */
1537 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1538 }
1539
1540 /* Write LEN instructions from BUF into the inferior memory at *TO.
1541
1542 Note instructions are always little endian on AArch64, unlike data. */
1543
1544 static void
1545 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1546 {
1547 size_t byte_len = len * sizeof (uint32_t);
1548 #if (__BYTE_ORDER == __BIG_ENDIAN)
1549 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1550 size_t i;
1551
1552 for (i = 0; i < len; i++)
1553 le_buf[i] = htole32 (buf[i]);
1554
1555 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1556
1557 xfree (le_buf);
1558 #else
1559 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1560 #endif
1561
1562 *to += byte_len;
1563 }
1564
1565 /* Sub-class of struct aarch64_insn_data, store information of
1566 instruction relocation for fast tracepoint. Visitor can
1567 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1568 the relocated instructions in buffer pointed by INSN_PTR. */
1569
1570 struct aarch64_insn_relocation_data
1571 {
1572 struct aarch64_insn_data base;
1573
1574 /* The new address the instruction is relocated to. */
1575 CORE_ADDR new_addr;
1576 /* Pointer to the buffer of relocated instruction(s). */
1577 uint32_t *insn_ptr;
1578 };
1579
1580 /* Implementation of aarch64_insn_visitor method "b". */
1581
1582 static void
1583 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1584 struct aarch64_insn_data *data)
1585 {
1586 struct aarch64_insn_relocation_data *insn_reloc
1587 = (struct aarch64_insn_relocation_data *) data;
1588 int64_t new_offset
1589 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1590
1591 if (can_encode_int32 (new_offset, 28))
1592 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1593 }
1594
1595 /* Implementation of aarch64_insn_visitor method "b_cond". */
1596
1597 static void
1598 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1599 struct aarch64_insn_data *data)
1600 {
1601 struct aarch64_insn_relocation_data *insn_reloc
1602 = (struct aarch64_insn_relocation_data *) data;
1603 int64_t new_offset
1604 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1605
1606 if (can_encode_int32 (new_offset, 21))
1607 {
1608 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1609 new_offset);
1610 }
1611 else if (can_encode_int32 (new_offset, 28))
1612 {
1613 /* The offset is out of range for a conditional branch
1614 instruction but not for a unconditional branch. We can use
1615 the following instructions instead:
1616
1617 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1618 B NOT_TAKEN ; Else jump over TAKEN and continue.
1619 TAKEN:
1620 B #(offset - 8)
1621 NOT_TAKEN:
1622
1623 */
1624
1625 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1626 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1627 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1628 }
1629 }
1630
1631 /* Implementation of aarch64_insn_visitor method "cb". */
1632
1633 static void
1634 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1635 const unsigned rn, int is64,
1636 struct aarch64_insn_data *data)
1637 {
1638 struct aarch64_insn_relocation_data *insn_reloc
1639 = (struct aarch64_insn_relocation_data *) data;
1640 int64_t new_offset
1641 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1642
1643 if (can_encode_int32 (new_offset, 21))
1644 {
1645 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1646 aarch64_register (rn, is64), new_offset);
1647 }
1648 else if (can_encode_int32 (new_offset, 28))
1649 {
1650 /* The offset is out of range for a compare and branch
1651 instruction but not for a unconditional branch. We can use
1652 the following instructions instead:
1653
1654 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1655 B NOT_TAKEN ; Else jump over TAKEN and continue.
1656 TAKEN:
1657 B #(offset - 8)
1658 NOT_TAKEN:
1659
1660 */
1661 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1662 aarch64_register (rn, is64), 8);
1663 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1664 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1665 }
1666 }
1667
1668 /* Implementation of aarch64_insn_visitor method "tb". */
1669
1670 static void
1671 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1672 const unsigned rt, unsigned bit,
1673 struct aarch64_insn_data *data)
1674 {
1675 struct aarch64_insn_relocation_data *insn_reloc
1676 = (struct aarch64_insn_relocation_data *) data;
1677 int64_t new_offset
1678 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1679
1680 if (can_encode_int32 (new_offset, 16))
1681 {
1682 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1683 aarch64_register (rt, 1), new_offset);
1684 }
1685 else if (can_encode_int32 (new_offset, 28))
1686 {
1687 /* The offset is out of range for a test bit and branch
1688 instruction but not for a unconditional branch. We can use
1689 the following instructions instead:
1690
1691 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1692 B NOT_TAKEN ; Else jump over TAKEN and continue.
1693 TAKEN:
1694 B #(offset - 8)
1695 NOT_TAKEN:
1696
1697 */
1698 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1699 aarch64_register (rt, 1), 8);
1700 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1701 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1702 new_offset - 8);
1703 }
1704 }
1705
1706 /* Implementation of aarch64_insn_visitor method "adr". */
1707
1708 static void
1709 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1710 const int is_adrp,
1711 struct aarch64_insn_data *data)
1712 {
1713 struct aarch64_insn_relocation_data *insn_reloc
1714 = (struct aarch64_insn_relocation_data *) data;
1715 /* We know exactly the address the ADR{P,} instruction will compute.
1716 We can just write it to the destination register. */
1717 CORE_ADDR address = data->insn_addr + offset;
1718
1719 if (is_adrp)
1720 {
1721 /* Clear the lower 12 bits of the offset to get the 4K page. */
1722 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1723 aarch64_register (rd, 1),
1724 address & ~0xfff);
1725 }
1726 else
1727 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1728 aarch64_register (rd, 1), address);
1729 }
1730
1731 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1732
1733 static void
1734 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1735 const unsigned rt, const int is64,
1736 struct aarch64_insn_data *data)
1737 {
1738 struct aarch64_insn_relocation_data *insn_reloc
1739 = (struct aarch64_insn_relocation_data *) data;
1740 CORE_ADDR address = data->insn_addr + offset;
1741
1742 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1743 aarch64_register (rt, 1), address);
1744
1745 /* We know exactly what address to load from, and what register we
1746 can use:
1747
1748 MOV xd, #(oldloc + offset)
1749 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1750 ...
1751
1752 LDR xd, [xd] ; or LDRSW xd, [xd]
1753
1754 */
1755
1756 if (is_sw)
1757 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1758 aarch64_register (rt, 1),
1759 aarch64_register (rt, 1),
1760 offset_memory_operand (0));
1761 else
1762 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1763 aarch64_register (rt, is64),
1764 aarch64_register (rt, 1),
1765 offset_memory_operand (0));
1766 }
1767
1768 /* Implementation of aarch64_insn_visitor method "others". */
1769
1770 static void
1771 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1772 struct aarch64_insn_data *data)
1773 {
1774 struct aarch64_insn_relocation_data *insn_reloc
1775 = (struct aarch64_insn_relocation_data *) data;
1776
1777 /* The instruction is not PC relative. Just re-emit it at the new
1778 location. */
1779 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1780 }
1781
1782 static const struct aarch64_insn_visitor visitor =
1783 {
1784 aarch64_ftrace_insn_reloc_b,
1785 aarch64_ftrace_insn_reloc_b_cond,
1786 aarch64_ftrace_insn_reloc_cb,
1787 aarch64_ftrace_insn_reloc_tb,
1788 aarch64_ftrace_insn_reloc_adr,
1789 aarch64_ftrace_insn_reloc_ldr_literal,
1790 aarch64_ftrace_insn_reloc_others,
1791 };
1792
1793 /* Implementation of linux_target_ops method
1794 "install_fast_tracepoint_jump_pad". */
1795
1796 static int
1797 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1798 CORE_ADDR tpaddr,
1799 CORE_ADDR collector,
1800 CORE_ADDR lockaddr,
1801 ULONGEST orig_size,
1802 CORE_ADDR *jump_entry,
1803 CORE_ADDR *trampoline,
1804 ULONGEST *trampoline_size,
1805 unsigned char *jjump_pad_insn,
1806 ULONGEST *jjump_pad_insn_size,
1807 CORE_ADDR *adjusted_insn_addr,
1808 CORE_ADDR *adjusted_insn_addr_end,
1809 char *err)
1810 {
1811 uint32_t buf[256];
1812 uint32_t *p = buf;
1813 int64_t offset;
1814 int i;
1815 uint32_t insn;
1816 CORE_ADDR buildaddr = *jump_entry;
1817 struct aarch64_insn_relocation_data insn_data;
1818
1819 /* We need to save the current state on the stack both to restore it
1820 later and to collect register values when the tracepoint is hit.
1821
1822 The saved registers are pushed in a layout that needs to be in sync
1823 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1824 the supply_fast_tracepoint_registers function will fill in the
1825 register cache from a pointer to saved registers on the stack we build
1826 here.
1827
1828 For simplicity, we set the size of each cell on the stack to 16 bytes.
1829 This way one cell can hold any register type, from system registers
1830 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1831 has to be 16 bytes aligned anyway.
1832
1833 Note that the CPSR register does not exist on AArch64. Instead we
1834 can access system bits describing the process state with the
1835 MRS/MSR instructions, namely the condition flags. We save them as
1836 if they are part of a CPSR register because that's how GDB
1837 interprets these system bits. At the moment, only the condition
1838 flags are saved in CPSR (NZCV).
1839
1840 Stack layout, each cell is 16 bytes (descending):
1841
1842 High *-------- SIMD&FP registers from 31 down to 0. --------*
1843 | q31 |
1844 . .
1845 . . 32 cells
1846 . .
1847 | q0 |
1848 *---- General purpose registers from 30 down to 0. ----*
1849 | x30 |
1850 . .
1851 . . 31 cells
1852 . .
1853 | x0 |
1854 *------------- Special purpose registers. -------------*
1855 | SP |
1856 | PC |
1857 | CPSR (NZCV) | 5 cells
1858 | FPSR |
1859 | FPCR | <- SP + 16
1860 *------------- collecting_t object --------------------*
1861 | TPIDR_EL0 | struct tracepoint * |
1862 Low *------------------------------------------------------*
1863
1864 After this stack is set up, we issue a call to the collector, passing
1865 it the saved registers at (SP + 16). */
1866
1867 /* Push SIMD&FP registers on the stack:
1868
1869 SUB sp, sp, #(32 * 16)
1870
1871 STP q30, q31, [sp, #(30 * 16)]
1872 ...
1873 STP q0, q1, [sp]
1874
1875 */
1876 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1877 for (i = 30; i >= 0; i -= 2)
1878 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1879
1880 /* Push general puspose registers on the stack. Note that we do not need
1881 to push x31 as it represents the xzr register and not the stack
1882 pointer in a STR instruction.
1883
1884 SUB sp, sp, #(31 * 16)
1885
1886 STR x30, [sp, #(30 * 16)]
1887 ...
1888 STR x0, [sp]
1889
1890 */
1891 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1892 for (i = 30; i >= 0; i -= 1)
1893 p += emit_str (p, aarch64_register (i, 1), sp,
1894 offset_memory_operand (i * 16));
1895
1896 /* Make space for 5 more cells.
1897
1898 SUB sp, sp, #(5 * 16)
1899
1900 */
1901 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1902
1903
1904 /* Save SP:
1905
1906 ADD x4, sp, #((32 + 31 + 5) * 16)
1907 STR x4, [sp, #(4 * 16)]
1908
1909 */
1910 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1911 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1912
1913 /* Save PC (tracepoint address):
1914
1915 MOV x3, #(tpaddr)
1916 ...
1917
1918 STR x3, [sp, #(3 * 16)]
1919
1920 */
1921
1922 p += emit_mov_addr (p, x3, tpaddr);
1923 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1924
1925 /* Save CPSR (NZCV), FPSR and FPCR:
1926
1927 MRS x2, nzcv
1928 MRS x1, fpsr
1929 MRS x0, fpcr
1930
1931 STR x2, [sp, #(2 * 16)]
1932 STR x1, [sp, #(1 * 16)]
1933 STR x0, [sp, #(0 * 16)]
1934
1935 */
1936 p += emit_mrs (p, x2, NZCV);
1937 p += emit_mrs (p, x1, FPSR);
1938 p += emit_mrs (p, x0, FPCR);
1939 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1940 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1941 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1942
1943 /* Push the collecting_t object. It consist of the address of the
1944 tracepoint and an ID for the current thread. We get the latter by
1945 reading the tpidr_el0 system register. It corresponds to the
1946 NT_ARM_TLS register accessible with ptrace.
1947
1948 MOV x0, #(tpoint)
1949 ...
1950
1951 MRS x1, tpidr_el0
1952
1953 STP x0, x1, [sp, #-16]!
1954
1955 */
1956
1957 p += emit_mov_addr (p, x0, tpoint);
1958 p += emit_mrs (p, x1, TPIDR_EL0);
1959 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1960
1961 /* Spin-lock:
1962
1963 The shared memory for the lock is at lockaddr. It will hold zero
1964 if no-one is holding the lock, otherwise it contains the address of
1965 the collecting_t object on the stack of the thread which acquired it.
1966
1967 At this stage, the stack pointer points to this thread's collecting_t
1968 object.
1969
1970 We use the following registers:
1971 - x0: Address of the lock.
1972 - x1: Pointer to collecting_t object.
1973 - x2: Scratch register.
1974
1975 MOV x0, #(lockaddr)
1976 ...
1977 MOV x1, sp
1978
1979 ; Trigger an event local to this core. So the following WFE
1980 ; instruction is ignored.
1981 SEVL
1982 again:
1983 ; Wait for an event. The event is triggered by either the SEVL
1984 ; or STLR instructions (store release).
1985 WFE
1986
1987 ; Atomically read at lockaddr. This marks the memory location as
1988 ; exclusive. This instruction also has memory constraints which
1989 ; make sure all previous data reads and writes are done before
1990 ; executing it.
1991 LDAXR x2, [x0]
1992
1993 ; Try again if another thread holds the lock.
1994 CBNZ x2, again
1995
1996 ; We can lock it! Write the address of the collecting_t object.
1997 ; This instruction will fail if the memory location is not marked
1998 ; as exclusive anymore. If it succeeds, it will remove the
1999 ; exclusive mark on the memory location. This way, if another
2000 ; thread executes this instruction before us, we will fail and try
2001 ; all over again.
2002 STXR w2, x1, [x0]
2003 CBNZ w2, again
2004
2005 */
2006
2007 p += emit_mov_addr (p, x0, lockaddr);
2008 p += emit_mov (p, x1, register_operand (sp));
2009
2010 p += emit_sevl (p);
2011 p += emit_wfe (p);
2012 p += emit_ldaxr (p, x2, x0);
2013 p += emit_cb (p, 1, w2, -2 * 4);
2014 p += emit_stxr (p, w2, x1, x0);
2015 p += emit_cb (p, 1, x2, -4 * 4);
2016
2017 /* Call collector (struct tracepoint *, unsigned char *):
2018
2019 MOV x0, #(tpoint)
2020 ...
2021
2022 ; Saved registers start after the collecting_t object.
2023 ADD x1, sp, #16
2024
2025 ; We use an intra-procedure-call scratch register.
2026 MOV ip0, #(collector)
2027 ...
2028
2029 ; And call back to C!
2030 BLR ip0
2031
2032 */
2033
2034 p += emit_mov_addr (p, x0, tpoint);
2035 p += emit_add (p, x1, sp, immediate_operand (16));
2036
2037 p += emit_mov_addr (p, ip0, collector);
2038 p += emit_blr (p, ip0);
2039
2040 /* Release the lock.
2041
2042 MOV x0, #(lockaddr)
2043 ...
2044
2045 ; This instruction is a normal store with memory ordering
2046 ; constraints. Thanks to this we do not have to put a data
2047 ; barrier instruction to make sure all data read and writes are done
2048 ; before this instruction is executed. Furthermore, this instrucion
2049 ; will trigger an event, letting other threads know they can grab
2050 ; the lock.
2051 STLR xzr, [x0]
2052
2053 */
2054 p += emit_mov_addr (p, x0, lockaddr);
2055 p += emit_stlr (p, xzr, x0);
2056
2057 /* Free collecting_t object:
2058
2059 ADD sp, sp, #16
2060
2061 */
2062 p += emit_add (p, sp, sp, immediate_operand (16));
2063
2064 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2065 registers from the stack.
2066
2067 LDR x2, [sp, #(2 * 16)]
2068 LDR x1, [sp, #(1 * 16)]
2069 LDR x0, [sp, #(0 * 16)]
2070
2071 MSR NZCV, x2
2072 MSR FPSR, x1
2073 MSR FPCR, x0
2074
2075 ADD sp, sp #(5 * 16)
2076
2077 */
2078 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2079 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2080 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2081 p += emit_msr (p, NZCV, x2);
2082 p += emit_msr (p, FPSR, x1);
2083 p += emit_msr (p, FPCR, x0);
2084
2085 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2086
2087 /* Pop general purpose registers:
2088
2089 LDR x0, [sp]
2090 ...
2091 LDR x30, [sp, #(30 * 16)]
2092
2093 ADD sp, sp, #(31 * 16)
2094
2095 */
2096 for (i = 0; i <= 30; i += 1)
2097 p += emit_ldr (p, aarch64_register (i, 1), sp,
2098 offset_memory_operand (i * 16));
2099 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2100
2101 /* Pop SIMD&FP registers:
2102
2103 LDP q0, q1, [sp]
2104 ...
2105 LDP q30, q31, [sp, #(30 * 16)]
2106
2107 ADD sp, sp, #(32 * 16)
2108
2109 */
2110 for (i = 0; i <= 30; i += 2)
2111 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2112 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2113
2114 /* Write the code into the inferior memory. */
2115 append_insns (&buildaddr, p - buf, buf);
2116
2117 /* Now emit the relocated instruction. */
2118 *adjusted_insn_addr = buildaddr;
2119 target_read_uint32 (tpaddr, &insn);
2120
2121 insn_data.base.insn_addr = tpaddr;
2122 insn_data.new_addr = buildaddr;
2123 insn_data.insn_ptr = buf;
2124
2125 aarch64_relocate_instruction (insn, &visitor,
2126 (struct aarch64_insn_data *) &insn_data);
2127
2128 /* We may not have been able to relocate the instruction. */
2129 if (insn_data.insn_ptr == buf)
2130 {
2131 sprintf (err,
2132 "E.Could not relocate instruction from %s to %s.",
2133 core_addr_to_string_nz (tpaddr),
2134 core_addr_to_string_nz (buildaddr));
2135 return 1;
2136 }
2137 else
2138 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2139 *adjusted_insn_addr_end = buildaddr;
2140
2141 /* Go back to the start of the buffer. */
2142 p = buf;
2143
2144 /* Emit a branch back from the jump pad. */
2145 offset = (tpaddr + orig_size - buildaddr);
2146 if (!can_encode_int32 (offset, 28))
2147 {
2148 sprintf (err,
2149 "E.Jump back from jump pad too far from tracepoint "
2150 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2151 offset);
2152 return 1;
2153 }
2154
2155 p += emit_b (p, 0, offset);
2156 append_insns (&buildaddr, p - buf, buf);
2157
2158 /* Give the caller a branch instruction into the jump pad. */
2159 offset = (*jump_entry - tpaddr);
2160 if (!can_encode_int32 (offset, 28))
2161 {
2162 sprintf (err,
2163 "E.Jump pad too far from tracepoint "
2164 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2165 offset);
2166 return 1;
2167 }
2168
2169 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2170 *jjump_pad_insn_size = 4;
2171
2172 /* Return the end address of our pad. */
2173 *jump_entry = buildaddr;
2174
2175 return 0;
2176 }
2177
2178 /* Helper function writing LEN instructions from START into
2179 current_insn_ptr. */
2180
2181 static void
2182 emit_ops_insns (const uint32_t *start, int len)
2183 {
2184 CORE_ADDR buildaddr = current_insn_ptr;
2185
2186 if (debug_threads)
2187 debug_printf ("Adding %d instrucions at %s\n",
2188 len, paddress (buildaddr));
2189
2190 append_insns (&buildaddr, len, start);
2191 current_insn_ptr = buildaddr;
2192 }
2193
2194 /* Pop a register from the stack. */
2195
2196 static int
2197 emit_pop (uint32_t *buf, struct aarch64_register rt)
2198 {
2199 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2200 }
2201
2202 /* Push a register on the stack. */
2203
2204 static int
2205 emit_push (uint32_t *buf, struct aarch64_register rt)
2206 {
2207 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2208 }
2209
2210 /* Implementation of emit_ops method "emit_prologue". */
2211
2212 static void
2213 aarch64_emit_prologue (void)
2214 {
2215 uint32_t buf[16];
2216 uint32_t *p = buf;
2217
2218 /* This function emit a prologue for the following function prototype:
2219
2220 enum eval_result_type f (unsigned char *regs,
2221 ULONGEST *value);
2222
2223 The first argument is a buffer of raw registers. The second
2224 argument is the result of
2225 evaluating the expression, which will be set to whatever is on top of
2226 the stack at the end.
2227
2228 The stack set up by the prologue is as such:
2229
2230 High *------------------------------------------------------*
2231 | LR |
2232 | FP | <- FP
2233 | x1 (ULONGEST *value) |
2234 | x0 (unsigned char *regs) |
2235 Low *------------------------------------------------------*
2236
2237 As we are implementing a stack machine, each opcode can expand the
2238 stack so we never know how far we are from the data saved by this
2239 prologue. In order to be able refer to value and regs later, we save
2240 the current stack pointer in the frame pointer. This way, it is not
2241 clobbered when calling C functions.
2242
2243 Finally, throughtout every operation, we are using register x0 as the
2244 top of the stack, and x1 as a scratch register. */
2245
2246 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2247 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2248 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2249
2250 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2251
2252
2253 emit_ops_insns (buf, p - buf);
2254 }
2255
2256 /* Implementation of emit_ops method "emit_epilogue". */
2257
2258 static void
2259 aarch64_emit_epilogue (void)
2260 {
2261 uint32_t buf[16];
2262 uint32_t *p = buf;
2263
2264 /* Store the result of the expression (x0) in *value. */
2265 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2266 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2267 p += emit_str (p, x0, x1, offset_memory_operand (0));
2268
2269 /* Restore the previous state. */
2270 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2271 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2272
2273 /* Return expr_eval_no_error. */
2274 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2275 p += emit_ret (p, lr);
2276
2277 emit_ops_insns (buf, p - buf);
2278 }
2279
2280 /* Implementation of emit_ops method "emit_add". */
2281
2282 static void
2283 aarch64_emit_add (void)
2284 {
2285 uint32_t buf[16];
2286 uint32_t *p = buf;
2287
2288 p += emit_pop (p, x1);
2289 p += emit_add (p, x0, x1, register_operand (x0));
2290
2291 emit_ops_insns (buf, p - buf);
2292 }
2293
2294 /* Implementation of emit_ops method "emit_sub". */
2295
2296 static void
2297 aarch64_emit_sub (void)
2298 {
2299 uint32_t buf[16];
2300 uint32_t *p = buf;
2301
2302 p += emit_pop (p, x1);
2303 p += emit_sub (p, x0, x1, register_operand (x0));
2304
2305 emit_ops_insns (buf, p - buf);
2306 }
2307
2308 /* Implementation of emit_ops method "emit_mul". */
2309
2310 static void
2311 aarch64_emit_mul (void)
2312 {
2313 uint32_t buf[16];
2314 uint32_t *p = buf;
2315
2316 p += emit_pop (p, x1);
2317 p += emit_mul (p, x0, x1, x0);
2318
2319 emit_ops_insns (buf, p - buf);
2320 }
2321
2322 /* Implementation of emit_ops method "emit_lsh". */
2323
2324 static void
2325 aarch64_emit_lsh (void)
2326 {
2327 uint32_t buf[16];
2328 uint32_t *p = buf;
2329
2330 p += emit_pop (p, x1);
2331 p += emit_lslv (p, x0, x1, x0);
2332
2333 emit_ops_insns (buf, p - buf);
2334 }
2335
2336 /* Implementation of emit_ops method "emit_rsh_signed". */
2337
2338 static void
2339 aarch64_emit_rsh_signed (void)
2340 {
2341 uint32_t buf[16];
2342 uint32_t *p = buf;
2343
2344 p += emit_pop (p, x1);
2345 p += emit_asrv (p, x0, x1, x0);
2346
2347 emit_ops_insns (buf, p - buf);
2348 }
2349
2350 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2351
2352 static void
2353 aarch64_emit_rsh_unsigned (void)
2354 {
2355 uint32_t buf[16];
2356 uint32_t *p = buf;
2357
2358 p += emit_pop (p, x1);
2359 p += emit_lsrv (p, x0, x1, x0);
2360
2361 emit_ops_insns (buf, p - buf);
2362 }
2363
2364 /* Implementation of emit_ops method "emit_ext". */
2365
2366 static void
2367 aarch64_emit_ext (int arg)
2368 {
2369 uint32_t buf[16];
2370 uint32_t *p = buf;
2371
2372 p += emit_sbfx (p, x0, x0, 0, arg);
2373
2374 emit_ops_insns (buf, p - buf);
2375 }
2376
2377 /* Implementation of emit_ops method "emit_log_not". */
2378
2379 static void
2380 aarch64_emit_log_not (void)
2381 {
2382 uint32_t buf[16];
2383 uint32_t *p = buf;
2384
2385 /* If the top of the stack is 0, replace it with 1. Else replace it with
2386 0. */
2387
2388 p += emit_cmp (p, x0, immediate_operand (0));
2389 p += emit_cset (p, x0, EQ);
2390
2391 emit_ops_insns (buf, p - buf);
2392 }
2393
2394 /* Implementation of emit_ops method "emit_bit_and". */
2395
2396 static void
2397 aarch64_emit_bit_and (void)
2398 {
2399 uint32_t buf[16];
2400 uint32_t *p = buf;
2401
2402 p += emit_pop (p, x1);
2403 p += emit_and (p, x0, x0, x1);
2404
2405 emit_ops_insns (buf, p - buf);
2406 }
2407
2408 /* Implementation of emit_ops method "emit_bit_or". */
2409
2410 static void
2411 aarch64_emit_bit_or (void)
2412 {
2413 uint32_t buf[16];
2414 uint32_t *p = buf;
2415
2416 p += emit_pop (p, x1);
2417 p += emit_orr (p, x0, x0, x1);
2418
2419 emit_ops_insns (buf, p - buf);
2420 }
2421
2422 /* Implementation of emit_ops method "emit_bit_xor". */
2423
2424 static void
2425 aarch64_emit_bit_xor (void)
2426 {
2427 uint32_t buf[16];
2428 uint32_t *p = buf;
2429
2430 p += emit_pop (p, x1);
2431 p += emit_eor (p, x0, x0, x1);
2432
2433 emit_ops_insns (buf, p - buf);
2434 }
2435
2436 /* Implementation of emit_ops method "emit_bit_not". */
2437
2438 static void
2439 aarch64_emit_bit_not (void)
2440 {
2441 uint32_t buf[16];
2442 uint32_t *p = buf;
2443
2444 p += emit_mvn (p, x0, x0);
2445
2446 emit_ops_insns (buf, p - buf);
2447 }
2448
2449 /* Implementation of emit_ops method "emit_equal". */
2450
2451 static void
2452 aarch64_emit_equal (void)
2453 {
2454 uint32_t buf[16];
2455 uint32_t *p = buf;
2456
2457 p += emit_pop (p, x1);
2458 p += emit_cmp (p, x0, register_operand (x1));
2459 p += emit_cset (p, x0, EQ);
2460
2461 emit_ops_insns (buf, p - buf);
2462 }
2463
2464 /* Implementation of emit_ops method "emit_less_signed". */
2465
2466 static void
2467 aarch64_emit_less_signed (void)
2468 {
2469 uint32_t buf[16];
2470 uint32_t *p = buf;
2471
2472 p += emit_pop (p, x1);
2473 p += emit_cmp (p, x1, register_operand (x0));
2474 p += emit_cset (p, x0, LT);
2475
2476 emit_ops_insns (buf, p - buf);
2477 }
2478
2479 /* Implementation of emit_ops method "emit_less_unsigned". */
2480
2481 static void
2482 aarch64_emit_less_unsigned (void)
2483 {
2484 uint32_t buf[16];
2485 uint32_t *p = buf;
2486
2487 p += emit_pop (p, x1);
2488 p += emit_cmp (p, x1, register_operand (x0));
2489 p += emit_cset (p, x0, LO);
2490
2491 emit_ops_insns (buf, p - buf);
2492 }
2493
2494 /* Implementation of emit_ops method "emit_ref". */
2495
2496 static void
2497 aarch64_emit_ref (int size)
2498 {
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 switch (size)
2503 {
2504 case 1:
2505 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2506 break;
2507 case 2:
2508 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2509 break;
2510 case 4:
2511 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2512 break;
2513 case 8:
2514 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2515 break;
2516 default:
2517 /* Unknown size, bail on compilation. */
2518 emit_error = 1;
2519 break;
2520 }
2521
2522 emit_ops_insns (buf, p - buf);
2523 }
2524
2525 /* Implementation of emit_ops method "emit_if_goto". */
2526
2527 static void
2528 aarch64_emit_if_goto (int *offset_p, int *size_p)
2529 {
2530 uint32_t buf[16];
2531 uint32_t *p = buf;
2532
2533 /* The Z flag is set or cleared here. */
2534 p += emit_cmp (p, x0, immediate_operand (0));
2535 /* This instruction must not change the Z flag. */
2536 p += emit_pop (p, x0);
2537 /* Branch over the next instruction if x0 == 0. */
2538 p += emit_bcond (p, EQ, 8);
2539
2540 /* The NOP instruction will be patched with an unconditional branch. */
2541 if (offset_p)
2542 *offset_p = (p - buf) * 4;
2543 if (size_p)
2544 *size_p = 4;
2545 p += emit_nop (p);
2546
2547 emit_ops_insns (buf, p - buf);
2548 }
2549
2550 /* Implementation of emit_ops method "emit_goto". */
2551
2552 static void
2553 aarch64_emit_goto (int *offset_p, int *size_p)
2554 {
2555 uint32_t buf[16];
2556 uint32_t *p = buf;
2557
2558 /* The NOP instruction will be patched with an unconditional branch. */
2559 if (offset_p)
2560 *offset_p = 0;
2561 if (size_p)
2562 *size_p = 4;
2563 p += emit_nop (p);
2564
2565 emit_ops_insns (buf, p - buf);
2566 }
2567
2568 /* Implementation of emit_ops method "write_goto_address". */
2569
2570 void
2571 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2572 {
2573 uint32_t insn;
2574
2575 emit_b (&insn, 0, to - from);
2576 append_insns (&from, 1, &insn);
2577 }
2578
2579 /* Implementation of emit_ops method "emit_const". */
2580
2581 static void
2582 aarch64_emit_const (LONGEST num)
2583 {
2584 uint32_t buf[16];
2585 uint32_t *p = buf;
2586
2587 p += emit_mov_addr (p, x0, num);
2588
2589 emit_ops_insns (buf, p - buf);
2590 }
2591
2592 /* Implementation of emit_ops method "emit_call". */
2593
2594 static void
2595 aarch64_emit_call (CORE_ADDR fn)
2596 {
2597 uint32_t buf[16];
2598 uint32_t *p = buf;
2599
2600 p += emit_mov_addr (p, ip0, fn);
2601 p += emit_blr (p, ip0);
2602
2603 emit_ops_insns (buf, p - buf);
2604 }
2605
2606 /* Implementation of emit_ops method "emit_reg". */
2607
2608 static void
2609 aarch64_emit_reg (int reg)
2610 {
2611 uint32_t buf[16];
2612 uint32_t *p = buf;
2613
2614 /* Set x0 to unsigned char *regs. */
2615 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2616 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2617 p += emit_mov (p, x1, immediate_operand (reg));
2618
2619 emit_ops_insns (buf, p - buf);
2620
2621 aarch64_emit_call (get_raw_reg_func_addr ());
2622 }
2623
2624 /* Implementation of emit_ops method "emit_pop". */
2625
2626 static void
2627 aarch64_emit_pop (void)
2628 {
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 p += emit_pop (p, x0);
2633
2634 emit_ops_insns (buf, p - buf);
2635 }
2636
2637 /* Implementation of emit_ops method "emit_stack_flush". */
2638
2639 static void
2640 aarch64_emit_stack_flush (void)
2641 {
2642 uint32_t buf[16];
2643 uint32_t *p = buf;
2644
2645 p += emit_push (p, x0);
2646
2647 emit_ops_insns (buf, p - buf);
2648 }
2649
2650 /* Implementation of emit_ops method "emit_zero_ext". */
2651
2652 static void
2653 aarch64_emit_zero_ext (int arg)
2654 {
2655 uint32_t buf[16];
2656 uint32_t *p = buf;
2657
2658 p += emit_ubfx (p, x0, x0, 0, arg);
2659
2660 emit_ops_insns (buf, p - buf);
2661 }
2662
2663 /* Implementation of emit_ops method "emit_swap". */
2664
2665 static void
2666 aarch64_emit_swap (void)
2667 {
2668 uint32_t buf[16];
2669 uint32_t *p = buf;
2670
2671 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2672 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2673 p += emit_mov (p, x0, register_operand (x1));
2674
2675 emit_ops_insns (buf, p - buf);
2676 }
2677
2678 /* Implementation of emit_ops method "emit_stack_adjust". */
2679
2680 static void
2681 aarch64_emit_stack_adjust (int n)
2682 {
2683 /* This is not needed with our design. */
2684 uint32_t buf[16];
2685 uint32_t *p = buf;
2686
2687 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2688
2689 emit_ops_insns (buf, p - buf);
2690 }
2691
2692 /* Implementation of emit_ops method "emit_int_call_1". */
2693
2694 static void
2695 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2696 {
2697 uint32_t buf[16];
2698 uint32_t *p = buf;
2699
2700 p += emit_mov (p, x0, immediate_operand (arg1));
2701
2702 emit_ops_insns (buf, p - buf);
2703
2704 aarch64_emit_call (fn);
2705 }
2706
2707 /* Implementation of emit_ops method "emit_void_call_2". */
2708
2709 static void
2710 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2711 {
2712 uint32_t buf[16];
2713 uint32_t *p = buf;
2714
2715 /* Push x0 on the stack. */
2716 aarch64_emit_stack_flush ();
2717
2718 /* Setup arguments for the function call:
2719
2720 x0: arg1
2721 x1: top of the stack
2722
2723 MOV x1, x0
2724 MOV x0, #arg1 */
2725
2726 p += emit_mov (p, x1, register_operand (x0));
2727 p += emit_mov (p, x0, immediate_operand (arg1));
2728
2729 emit_ops_insns (buf, p - buf);
2730
2731 aarch64_emit_call (fn);
2732
2733 /* Restore x0. */
2734 aarch64_emit_pop ();
2735 }
2736
2737 /* Implementation of emit_ops method "emit_eq_goto". */
2738
2739 static void
2740 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2741 {
2742 uint32_t buf[16];
2743 uint32_t *p = buf;
2744
2745 p += emit_pop (p, x1);
2746 p += emit_cmp (p, x1, register_operand (x0));
2747 /* Branch over the next instruction if x0 != x1. */
2748 p += emit_bcond (p, NE, 8);
2749 /* The NOP instruction will be patched with an unconditional branch. */
2750 if (offset_p)
2751 *offset_p = (p - buf) * 4;
2752 if (size_p)
2753 *size_p = 4;
2754 p += emit_nop (p);
2755
2756 emit_ops_insns (buf, p - buf);
2757 }
2758
2759 /* Implementation of emit_ops method "emit_ne_goto". */
2760
2761 static void
2762 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2763 {
2764 uint32_t buf[16];
2765 uint32_t *p = buf;
2766
2767 p += emit_pop (p, x1);
2768 p += emit_cmp (p, x1, register_operand (x0));
2769 /* Branch over the next instruction if x0 == x1. */
2770 p += emit_bcond (p, EQ, 8);
2771 /* The NOP instruction will be patched with an unconditional branch. */
2772 if (offset_p)
2773 *offset_p = (p - buf) * 4;
2774 if (size_p)
2775 *size_p = 4;
2776 p += emit_nop (p);
2777
2778 emit_ops_insns (buf, p - buf);
2779 }
2780
2781 /* Implementation of emit_ops method "emit_lt_goto". */
2782
2783 static void
2784 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2785 {
2786 uint32_t buf[16];
2787 uint32_t *p = buf;
2788
2789 p += emit_pop (p, x1);
2790 p += emit_cmp (p, x1, register_operand (x0));
2791 /* Branch over the next instruction if x0 >= x1. */
2792 p += emit_bcond (p, GE, 8);
2793 /* The NOP instruction will be patched with an unconditional branch. */
2794 if (offset_p)
2795 *offset_p = (p - buf) * 4;
2796 if (size_p)
2797 *size_p = 4;
2798 p += emit_nop (p);
2799
2800 emit_ops_insns (buf, p - buf);
2801 }
2802
2803 /* Implementation of emit_ops method "emit_le_goto". */
2804
2805 static void
2806 aarch64_emit_le_goto (int *offset_p, int *size_p)
2807 {
2808 uint32_t buf[16];
2809 uint32_t *p = buf;
2810
2811 p += emit_pop (p, x1);
2812 p += emit_cmp (p, x1, register_operand (x0));
2813 /* Branch over the next instruction if x0 > x1. */
2814 p += emit_bcond (p, GT, 8);
2815 /* The NOP instruction will be patched with an unconditional branch. */
2816 if (offset_p)
2817 *offset_p = (p - buf) * 4;
2818 if (size_p)
2819 *size_p = 4;
2820 p += emit_nop (p);
2821
2822 emit_ops_insns (buf, p - buf);
2823 }
2824
2825 /* Implementation of emit_ops method "emit_gt_goto". */
2826
2827 static void
2828 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2829 {
2830 uint32_t buf[16];
2831 uint32_t *p = buf;
2832
2833 p += emit_pop (p, x1);
2834 p += emit_cmp (p, x1, register_operand (x0));
2835 /* Branch over the next instruction if x0 <= x1. */
2836 p += emit_bcond (p, LE, 8);
2837 /* The NOP instruction will be patched with an unconditional branch. */
2838 if (offset_p)
2839 *offset_p = (p - buf) * 4;
2840 if (size_p)
2841 *size_p = 4;
2842 p += emit_nop (p);
2843
2844 emit_ops_insns (buf, p - buf);
2845 }
2846
2847 /* Implementation of emit_ops method "emit_ge_got". */
2848
2849 static void
2850 aarch64_emit_ge_got (int *offset_p, int *size_p)
2851 {
2852 uint32_t buf[16];
2853 uint32_t *p = buf;
2854
2855 p += emit_pop (p, x1);
2856 p += emit_cmp (p, x1, register_operand (x0));
2857 /* Branch over the next instruction if x0 <= x1. */
2858 p += emit_bcond (p, LT, 8);
2859 /* The NOP instruction will be patched with an unconditional branch. */
2860 if (offset_p)
2861 *offset_p = (p - buf) * 4;
2862 if (size_p)
2863 *size_p = 4;
2864 p += emit_nop (p);
2865
2866 emit_ops_insns (buf, p - buf);
2867 }
2868
2869 static struct emit_ops aarch64_emit_ops_impl =
2870 {
2871 aarch64_emit_prologue,
2872 aarch64_emit_epilogue,
2873 aarch64_emit_add,
2874 aarch64_emit_sub,
2875 aarch64_emit_mul,
2876 aarch64_emit_lsh,
2877 aarch64_emit_rsh_signed,
2878 aarch64_emit_rsh_unsigned,
2879 aarch64_emit_ext,
2880 aarch64_emit_log_not,
2881 aarch64_emit_bit_and,
2882 aarch64_emit_bit_or,
2883 aarch64_emit_bit_xor,
2884 aarch64_emit_bit_not,
2885 aarch64_emit_equal,
2886 aarch64_emit_less_signed,
2887 aarch64_emit_less_unsigned,
2888 aarch64_emit_ref,
2889 aarch64_emit_if_goto,
2890 aarch64_emit_goto,
2891 aarch64_write_goto_address,
2892 aarch64_emit_const,
2893 aarch64_emit_call,
2894 aarch64_emit_reg,
2895 aarch64_emit_pop,
2896 aarch64_emit_stack_flush,
2897 aarch64_emit_zero_ext,
2898 aarch64_emit_swap,
2899 aarch64_emit_stack_adjust,
2900 aarch64_emit_int_call_1,
2901 aarch64_emit_void_call_2,
2902 aarch64_emit_eq_goto,
2903 aarch64_emit_ne_goto,
2904 aarch64_emit_lt_goto,
2905 aarch64_emit_le_goto,
2906 aarch64_emit_gt_goto,
2907 aarch64_emit_ge_got,
2908 };
2909
2910 /* Implementation of linux_target_ops method "emit_ops". */
2911
2912 static struct emit_ops *
2913 aarch64_emit_ops (void)
2914 {
2915 return &aarch64_emit_ops_impl;
2916 }
2917
2918 /* Implementation of linux_target_ops method
2919 "get_min_fast_tracepoint_insn_len". */
2920
2921 static int
2922 aarch64_get_min_fast_tracepoint_insn_len (void)
2923 {
2924 return 4;
2925 }
2926
2927 /* Implementation of linux_target_ops method "supports_range_stepping". */
2928
2929 static int
2930 aarch64_supports_range_stepping (void)
2931 {
2932 return 1;
2933 }
2934
2935 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2936
2937 static const gdb_byte *
2938 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2939 {
2940 if (is_64bit_tdesc ())
2941 {
2942 *size = aarch64_breakpoint_len;
2943 return aarch64_breakpoint;
2944 }
2945 else
2946 return arm_sw_breakpoint_from_kind (kind, size);
2947 }
2948
2949 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2950
2951 static int
2952 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2953 {
2954 if (is_64bit_tdesc ())
2955 return aarch64_breakpoint_len;
2956 else
2957 return arm_breakpoint_kind_from_pc (pcptr);
2958 }
2959
2960 /* Implementation of the linux_target_ops method
2961 "breakpoint_kind_from_current_state". */
2962
2963 static int
2964 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2965 {
2966 if (is_64bit_tdesc ())
2967 return aarch64_breakpoint_len;
2968 else
2969 return arm_breakpoint_kind_from_current_state (pcptr);
2970 }
2971
2972 /* Support for hardware single step. */
2973
2974 static int
2975 aarch64_supports_hardware_single_step (void)
2976 {
2977 return 1;
2978 }
2979
2980 struct linux_target_ops the_low_target =
2981 {
2982 aarch64_arch_setup,
2983 aarch64_regs_info,
2984 aarch64_cannot_fetch_register,
2985 aarch64_cannot_store_register,
2986 NULL, /* fetch_register */
2987 aarch64_get_pc,
2988 aarch64_set_pc,
2989 aarch64_breakpoint_kind_from_pc,
2990 aarch64_sw_breakpoint_from_kind,
2991 NULL, /* get_next_pcs */
2992 0, /* decr_pc_after_break */
2993 aarch64_breakpoint_at,
2994 aarch64_supports_z_point_type,
2995 aarch64_insert_point,
2996 aarch64_remove_point,
2997 aarch64_stopped_by_watchpoint,
2998 aarch64_stopped_data_address,
2999 NULL, /* collect_ptrace_register */
3000 NULL, /* supply_ptrace_register */
3001 aarch64_linux_siginfo_fixup,
3002 aarch64_linux_new_process,
3003 aarch64_linux_delete_process,
3004 aarch64_linux_new_thread,
3005 aarch64_linux_delete_thread,
3006 aarch64_linux_new_fork,
3007 aarch64_linux_prepare_to_resume,
3008 NULL, /* process_qsupported */
3009 aarch64_supports_tracepoints,
3010 aarch64_get_thread_area,
3011 aarch64_install_fast_tracepoint_jump_pad,
3012 aarch64_emit_ops,
3013 aarch64_get_min_fast_tracepoint_insn_len,
3014 aarch64_supports_range_stepping,
3015 aarch64_breakpoint_kind_from_current_state,
3016 aarch64_supports_hardware_single_step,
3017 aarch64_get_syscall_trapinfo,
3018 };
3019
3020 void
3021 initialize_low_arch (void)
3022 {
3023 initialize_low_arch_aarch32 ();
3024
3025 initialize_regsets_info (&aarch64_regsets_info);
3026
3027 #if GDB_SELF_TEST
3028 initialize_low_tdesc ();
3029 #endif
3030 }
This page took 0.101283 seconds and 4 git commands to generate.