1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
157 #else /* ! __x86_64__ */
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap
[] =
163 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
164 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
165 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
166 DS
* 4, ES
* 4, FS
* 4, GS
* 4
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177 /* Returns true if the current inferior belongs to a x86-64 process,
181 is_64bit_tdesc (void)
183 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
185 return register_size (regcache
->tdesc
, 0) == 8;
191 /* Called by libthread_db. */
194 ps_get_thread_area (struct ps_prochandle
*ph
,
195 lwpid_t lwpid
, int idx
, void **base
)
198 int use_64bit
= is_64bit_tdesc ();
205 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
209 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
220 unsigned int desc
[4];
222 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
223 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base
= (void *) (uintptr_t) desc
[1];
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
238 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
241 int use_64bit
= is_64bit_tdesc ();
246 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
248 *addr
= (CORE_ADDR
) (uintptr_t) base
;
257 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
258 struct thread_info
*thr
= get_lwp_thread (lwp
);
259 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
260 unsigned int desc
[4];
262 const int reg_thread_area
= 3; /* bits to scale down register value. */
265 collect_register_by_name (regcache
, "gs", &gs
);
267 idx
= gs
>> reg_thread_area
;
269 if (ptrace (PTRACE_GET_THREAD_AREA
,
271 (void *) (long) idx
, (unsigned long) &desc
) < 0)
282 x86_cannot_store_register (int regno
)
285 if (is_64bit_tdesc ())
289 return regno
>= I386_NUM_REGS
;
293 x86_cannot_fetch_register (int regno
)
296 if (is_64bit_tdesc ())
300 return regno
>= I386_NUM_REGS
;
304 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
309 if (register_size (regcache
->tdesc
, 0) == 8)
311 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
312 if (x86_64_regmap
[i
] != -1)
313 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
318 int lwpid
= lwpid_of (current_thread
);
320 collect_register_by_name (regcache
, "fs_base", &base
);
321 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
323 collect_register_by_name (regcache
, "gs_base", &base
);
324 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
336 for (i
= 0; i
< I386_NUM_REGS
; i
++)
337 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
339 collect_register_by_name (regcache
, "orig_eax",
340 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
344 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
349 if (register_size (regcache
->tdesc
, 0) == 8)
351 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
352 if (x86_64_regmap
[i
] != -1)
353 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
355 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
358 int lwpid
= lwpid_of (current_thread
);
360 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
361 supply_register_by_name (regcache
, "fs_base", &base
);
363 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
364 supply_register_by_name (regcache
, "gs_base", &base
);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 supply_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
379 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
382 i387_cache_to_fxsave (regcache
, buf
);
384 i387_cache_to_fsave (regcache
, buf
);
389 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
392 i387_fxsave_to_cache (regcache
, buf
);
394 i387_fsave_to_cache (regcache
, buf
);
401 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
403 i387_cache_to_fxsave (regcache
, buf
);
407 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
409 i387_fxsave_to_cache (regcache
, buf
);
415 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
417 i387_cache_to_xsave (regcache
, buf
);
421 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
423 i387_xsave_to_cache (regcache
, buf
);
426 /* ??? The non-biarch i386 case stores all the i387 regs twice.
427 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
428 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
429 doesn't work. IWBN to avoid the duplication in the case where it
430 does work. Maybe the arch_setup routine could check whether it works
431 and update the supported regsets accordingly. */
433 static struct regset_info x86_regsets
[] =
435 #ifdef HAVE_PTRACE_GETREGS
436 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
438 x86_fill_gregset
, x86_store_gregset
},
439 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
440 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
442 # ifdef HAVE_PTRACE_GETFPXREGS
443 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
445 x86_fill_fpxregset
, x86_store_fpxregset
},
448 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
450 x86_fill_fpregset
, x86_store_fpregset
},
451 #endif /* HAVE_PTRACE_GETREGS */
456 x86_get_pc (struct regcache
*regcache
)
458 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
464 collect_register_by_name (regcache
, "rip", &pc
);
465 return (CORE_ADDR
) pc
;
471 collect_register_by_name (regcache
, "eip", &pc
);
472 return (CORE_ADDR
) pc
;
477 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
479 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
485 supply_register_by_name (regcache
, "rip", &newpc
);
491 supply_register_by_name (regcache
, "eip", &newpc
);
495 static const gdb_byte x86_breakpoint
[] = { 0xCC };
496 #define x86_breakpoint_len 1
499 x86_breakpoint_at (CORE_ADDR pc
)
503 (*the_target
->read_memory
) (pc
, &c
, 1);
510 /* Low-level function vector. */
511 struct x86_dr_low_type x86_dr_low
=
513 x86_linux_dr_set_control
,
514 x86_linux_dr_set_addr
,
515 x86_linux_dr_get_addr
,
516 x86_linux_dr_get_status
,
517 x86_linux_dr_get_control
,
521 /* Breakpoint/Watchpoint support. */
524 x86_supports_z_point_type (char z_type
)
530 case Z_PACKET_WRITE_WP
:
531 case Z_PACKET_ACCESS_WP
:
539 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
540 int size
, struct raw_breakpoint
*bp
)
542 struct process_info
*proc
= current_process ();
546 case raw_bkpt_type_hw
:
547 case raw_bkpt_type_write_wp
:
548 case raw_bkpt_type_access_wp
:
550 enum target_hw_bp_type hw_type
551 = raw_bkpt_type_to_target_hw_bp_type (type
);
552 struct x86_debug_reg_state
*state
553 = &proc
->priv
->arch_private
->debug_reg_state
;
555 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
565 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
566 int size
, struct raw_breakpoint
*bp
)
568 struct process_info
*proc
= current_process ();
572 case raw_bkpt_type_hw
:
573 case raw_bkpt_type_write_wp
:
574 case raw_bkpt_type_access_wp
:
576 enum target_hw_bp_type hw_type
577 = raw_bkpt_type_to_target_hw_bp_type (type
);
578 struct x86_debug_reg_state
*state
579 = &proc
->priv
->arch_private
->debug_reg_state
;
581 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
590 x86_stopped_by_watchpoint (void)
592 struct process_info
*proc
= current_process ();
593 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
597 x86_stopped_data_address (void)
599 struct process_info
*proc
= current_process ();
601 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
607 /* Called when a new process is created. */
609 static struct arch_process_info
*
610 x86_linux_new_process (void)
612 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
614 x86_low_init_dregs (&info
->debug_reg_state
);
619 /* Target routine for linux_new_fork. */
622 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
624 /* These are allocated by linux_add_process. */
625 gdb_assert (parent
->priv
!= NULL
626 && parent
->priv
->arch_private
!= NULL
);
627 gdb_assert (child
->priv
!= NULL
628 && child
->priv
->arch_private
!= NULL
);
630 /* Linux kernel before 2.6.33 commit
631 72f674d203cd230426437cdcf7dd6f681dad8b0d
632 will inherit hardware debug registers from parent
633 on fork/vfork/clone. Newer Linux kernels create such tasks with
634 zeroed debug registers.
636 GDB core assumes the child inherits the watchpoints/hw
637 breakpoints of the parent, and will remove them all from the
638 forked off process. Copy the debug registers mirrors into the
639 new process so that all breakpoints and watchpoints can be
640 removed together. The debug registers mirror will become zeroed
641 in the end before detaching the forked off process, thus making
642 this compatible with older Linux kernels too. */
644 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
647 /* See nat/x86-dregs.h. */
649 struct x86_debug_reg_state
*
650 x86_debug_reg_state (pid_t pid
)
652 struct process_info
*proc
= find_process_pid (pid
);
654 return &proc
->priv
->arch_private
->debug_reg_state
;
657 /* When GDBSERVER is built as a 64-bit application on linux, the
658 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
659 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
660 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
661 conversion in-place ourselves. */
663 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
664 layout of the inferiors' architecture. Returns true if any
665 conversion was done; false otherwise. If DIRECTION is 1, then copy
666 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
670 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
673 unsigned int machine
;
674 int tid
= lwpid_of (current_thread
);
675 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
677 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
678 if (!is_64bit_tdesc ())
679 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
681 /* No fixup for native x32 GDB. */
682 else if (!is_elf64
&& sizeof (void *) == 8)
683 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
692 /* Format of XSAVE extended state is:
696 sw_usable_bytes[464..511]
697 xstate_hdr_bytes[512..575]
702 Same memory layout will be used for the coredump NT_X86_XSTATE
703 representing the XSAVE extended state registers.
705 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
706 extended state mask, which is the same as the extended control register
707 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
708 together with the mask saved in the xstate_hdr_bytes to determine what
709 states the processor/OS supports and what state, used or initialized,
710 the process/thread is in. */
711 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
713 /* Does the current host support the GETFPXREGS request? The header
714 file may or may not define it, and even if it is defined, the
715 kernel will return EIO if it's running on a pre-SSE processor. */
716 int have_ptrace_getfpxregs
=
717 #ifdef HAVE_PTRACE_GETFPXREGS
724 /* Get Linux/x86 target description from running target. */
726 static const struct target_desc
*
727 x86_linux_read_description (void)
729 unsigned int machine
;
733 static uint64_t xcr0
;
734 struct regset_info
*regset
;
736 tid
= lwpid_of (current_thread
);
738 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
740 if (sizeof (void *) == 4)
743 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
745 else if (machine
== EM_X86_64
)
746 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
750 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
751 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
753 elf_fpxregset_t fpxregs
;
755 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
757 have_ptrace_getfpxregs
= 0;
758 have_ptrace_getregset
= 0;
759 return tdesc_i386_mmx_linux
;
762 have_ptrace_getfpxregs
= 1;
768 x86_xcr0
= X86_XSTATE_SSE_MASK
;
772 if (machine
== EM_X86_64
)
773 return tdesc_amd64_linux_no_xml
;
776 return tdesc_i386_linux_no_xml
;
779 if (have_ptrace_getregset
== -1)
781 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
784 iov
.iov_base
= xstateregs
;
785 iov
.iov_len
= sizeof (xstateregs
);
787 /* Check if PTRACE_GETREGSET works. */
788 if (ptrace (PTRACE_GETREGSET
, tid
,
789 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
790 have_ptrace_getregset
= 0;
793 have_ptrace_getregset
= 1;
795 /* Get XCR0 from XSAVE extended state. */
796 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
797 / sizeof (uint64_t))];
799 /* Use PTRACE_GETREGSET if it is available. */
800 for (regset
= x86_regsets
;
801 regset
->fill_function
!= NULL
; regset
++)
802 if (regset
->get_request
== PTRACE_GETREGSET
)
803 regset
->size
= X86_XSTATE_SIZE (xcr0
);
804 else if (regset
->type
!= GENERAL_REGS
)
809 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
810 xcr0_features
= (have_ptrace_getregset
811 && (xcr0
& X86_XSTATE_ALL_MASK
));
816 if (machine
== EM_X86_64
)
823 switch (xcr0
& X86_XSTATE_ALL_MASK
)
825 case X86_XSTATE_AVX_MPX_AVX512_MASK
:
826 return tdesc_amd64_avx_mpx_avx512_linux
;
828 case X86_XSTATE_AVX_AVX512_MASK
:
829 return tdesc_amd64_avx_avx512_linux
;
831 case X86_XSTATE_AVX_MPX_MASK
:
832 return tdesc_amd64_avx_mpx_linux
;
834 case X86_XSTATE_MPX_MASK
:
835 return tdesc_amd64_mpx_linux
;
837 case X86_XSTATE_AVX_MASK
:
838 return tdesc_amd64_avx_linux
;
841 return tdesc_amd64_linux
;
845 return tdesc_amd64_linux
;
851 switch (xcr0
& X86_XSTATE_ALL_MASK
)
853 case X86_XSTATE_AVX_MPX_AVX512_MASK
: /* No MPX on x32. */
854 case X86_XSTATE_AVX_AVX512_MASK
:
855 return tdesc_x32_avx_avx512_linux
;
857 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
858 case X86_XSTATE_AVX_MASK
:
859 return tdesc_x32_avx_linux
;
862 return tdesc_x32_linux
;
866 return tdesc_x32_linux
;
874 switch (xcr0
& X86_XSTATE_ALL_MASK
)
876 case (X86_XSTATE_AVX_MPX_AVX512_MASK
):
877 return tdesc_i386_avx_mpx_avx512_linux
;
879 case (X86_XSTATE_AVX_AVX512_MASK
):
880 return tdesc_i386_avx_avx512_linux
;
882 case (X86_XSTATE_MPX_MASK
):
883 return tdesc_i386_mpx_linux
;
885 case (X86_XSTATE_AVX_MPX_MASK
):
886 return tdesc_i386_avx_mpx_linux
;
888 case (X86_XSTATE_AVX_MASK
):
889 return tdesc_i386_avx_linux
;
892 return tdesc_i386_linux
;
896 return tdesc_i386_linux
;
899 gdb_assert_not_reached ("failed to return tdesc");
902 /* Callback for find_inferior. Stops iteration when a thread with a
903 given PID is found. */
906 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
908 int pid
= *(int *) data
;
910 return (ptid_get_pid (entry
->id
) == pid
);
913 /* Callback for for_each_inferior. Calls the arch_setup routine for
917 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
919 int pid
= ptid_get_pid (entry
->id
);
921 /* Look up any thread of this processes. */
923 = (struct thread_info
*) find_inferior (&all_threads
,
924 same_process_callback
, &pid
);
926 the_low_target
.arch_setup ();
929 /* Update all the target description of all processes; a new GDB
930 connected, and it may or not support xml target descriptions. */
933 x86_linux_update_xmltarget (void)
935 struct thread_info
*saved_thread
= current_thread
;
937 /* Before changing the register cache's internal layout, flush the
938 contents of the current valid caches back to the threads, and
939 release the current regcache objects. */
942 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
944 current_thread
= saved_thread
;
947 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
951 x86_linux_process_qsupported (char **features
, int count
)
955 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
956 with "i386" in qSupported query, it supports x86 XML target
959 for (i
= 0; i
< count
; i
++)
961 const char *feature
= features
[i
];
963 if (startswith (feature
, "xmlRegisters="))
965 char *copy
= xstrdup (feature
+ 13);
968 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
970 if (strcmp (p
, "i386") == 0)
980 x86_linux_update_xmltarget ();
983 /* Common for x86/x86-64. */
985 static struct regsets_info x86_regsets_info
=
987 x86_regsets
, /* regsets */
989 NULL
, /* disabled_regsets */
993 static struct regs_info amd64_linux_regs_info
=
995 NULL
, /* regset_bitmap */
996 NULL
, /* usrregs_info */
1000 static struct usrregs_info i386_linux_usrregs_info
=
1006 static struct regs_info i386_linux_regs_info
=
1008 NULL
, /* regset_bitmap */
1009 &i386_linux_usrregs_info
,
1013 const struct regs_info
*
1014 x86_linux_regs_info (void)
1017 if (is_64bit_tdesc ())
1018 return &amd64_linux_regs_info
;
1021 return &i386_linux_regs_info
;
1024 /* Initialize the target description for the architecture of the
1028 x86_arch_setup (void)
1030 current_process ()->tdesc
= x86_linux_read_description ();
1033 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1034 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1037 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1039 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1045 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1046 *sysno
= (int) l_sysno
;
1049 collect_register_by_name (regcache
, "orig_eax", sysno
);
1053 x86_supports_tracepoints (void)
1059 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1061 write_inferior_memory (*to
, buf
, len
);
1066 push_opcode (unsigned char *buf
, char *op
)
1068 unsigned char *buf_org
= buf
;
1073 unsigned long ul
= strtoul (op
, &endptr
, 16);
1082 return buf
- buf_org
;
1087 /* Build a jump pad that saves registers and calls a collection
1088 function. Writes a jump instruction to the jump pad to
1089 JJUMPAD_INSN. The caller is responsible to write it in at the
1090 tracepoint address. */
1093 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1094 CORE_ADDR collector
,
1097 CORE_ADDR
*jump_entry
,
1098 CORE_ADDR
*trampoline
,
1099 ULONGEST
*trampoline_size
,
1100 unsigned char *jjump_pad_insn
,
1101 ULONGEST
*jjump_pad_insn_size
,
1102 CORE_ADDR
*adjusted_insn_addr
,
1103 CORE_ADDR
*adjusted_insn_addr_end
,
1106 unsigned char buf
[40];
1110 CORE_ADDR buildaddr
= *jump_entry
;
1112 /* Build the jump pad. */
1114 /* First, do tracepoint data collection. Save registers. */
1116 /* Need to ensure stack pointer saved first. */
1117 buf
[i
++] = 0x54; /* push %rsp */
1118 buf
[i
++] = 0x55; /* push %rbp */
1119 buf
[i
++] = 0x57; /* push %rdi */
1120 buf
[i
++] = 0x56; /* push %rsi */
1121 buf
[i
++] = 0x52; /* push %rdx */
1122 buf
[i
++] = 0x51; /* push %rcx */
1123 buf
[i
++] = 0x53; /* push %rbx */
1124 buf
[i
++] = 0x50; /* push %rax */
1125 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1126 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1127 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1128 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1129 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1130 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1131 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1132 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1133 buf
[i
++] = 0x9c; /* pushfq */
1134 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1136 memcpy (buf
+ i
, &tpaddr
, 8);
1138 buf
[i
++] = 0x57; /* push %rdi */
1139 append_insns (&buildaddr
, i
, buf
);
1141 /* Stack space for the collecting_t object. */
1143 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1144 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1145 memcpy (buf
+ i
, &tpoint
, 8);
1147 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1148 i
+= push_opcode (&buf
[i
],
1149 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1150 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1151 append_insns (&buildaddr
, i
, buf
);
1155 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1156 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1158 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1159 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1160 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1161 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1162 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1163 append_insns (&buildaddr
, i
, buf
);
1165 /* Set up the gdb_collect call. */
1166 /* At this point, (stack pointer + 0x18) is the base of our saved
1170 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1171 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1173 /* tpoint address may be 64-bit wide. */
1174 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1175 memcpy (buf
+ i
, &tpoint
, 8);
1177 append_insns (&buildaddr
, i
, buf
);
1179 /* The collector function being in the shared library, may be
1180 >31-bits away off the jump pad. */
1182 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1183 memcpy (buf
+ i
, &collector
, 8);
1185 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1186 append_insns (&buildaddr
, i
, buf
);
1188 /* Clear the spin-lock. */
1190 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1191 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1192 memcpy (buf
+ i
, &lockaddr
, 8);
1194 append_insns (&buildaddr
, i
, buf
);
1196 /* Remove stack that had been used for the collect_t object. */
1198 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1199 append_insns (&buildaddr
, i
, buf
);
1201 /* Restore register state. */
1203 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1207 buf
[i
++] = 0x9d; /* popfq */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1209 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1210 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1211 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1212 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1213 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1214 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1215 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1216 buf
[i
++] = 0x58; /* pop %rax */
1217 buf
[i
++] = 0x5b; /* pop %rbx */
1218 buf
[i
++] = 0x59; /* pop %rcx */
1219 buf
[i
++] = 0x5a; /* pop %rdx */
1220 buf
[i
++] = 0x5e; /* pop %rsi */
1221 buf
[i
++] = 0x5f; /* pop %rdi */
1222 buf
[i
++] = 0x5d; /* pop %rbp */
1223 buf
[i
++] = 0x5c; /* pop %rsp */
1224 append_insns (&buildaddr
, i
, buf
);
1226 /* Now, adjust the original instruction to execute in the jump
1228 *adjusted_insn_addr
= buildaddr
;
1229 relocate_instruction (&buildaddr
, tpaddr
);
1230 *adjusted_insn_addr_end
= buildaddr
;
1232 /* Finally, write a jump back to the program. */
1234 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1235 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1238 "E.Jump back from jump pad too far from tracepoint "
1239 "(offset 0x%" PRIx64
" > int32).", loffset
);
1243 offset
= (int) loffset
;
1244 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1245 memcpy (buf
+ 1, &offset
, 4);
1246 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1248 /* The jump pad is now built. Wire in a jump to our jump pad. This
1249 is always done last (by our caller actually), so that we can
1250 install fast tracepoints with threads running. This relies on
1251 the agent's atomic write support. */
1252 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1253 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1256 "E.Jump pad too far from tracepoint "
1257 "(offset 0x%" PRIx64
" > int32).", loffset
);
1261 offset
= (int) loffset
;
1263 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1264 memcpy (buf
+ 1, &offset
, 4);
1265 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1266 *jjump_pad_insn_size
= sizeof (jump_insn
);
1268 /* Return the end address of our pad. */
1269 *jump_entry
= buildaddr
;
1274 #endif /* __x86_64__ */
1276 /* Build a jump pad that saves registers and calls a collection
1277 function. Writes a jump instruction to the jump pad to
1278 JJUMPAD_INSN. The caller is responsible to write it in at the
1279 tracepoint address. */
1282 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1283 CORE_ADDR collector
,
1286 CORE_ADDR
*jump_entry
,
1287 CORE_ADDR
*trampoline
,
1288 ULONGEST
*trampoline_size
,
1289 unsigned char *jjump_pad_insn
,
1290 ULONGEST
*jjump_pad_insn_size
,
1291 CORE_ADDR
*adjusted_insn_addr
,
1292 CORE_ADDR
*adjusted_insn_addr_end
,
1295 unsigned char buf
[0x100];
1297 CORE_ADDR buildaddr
= *jump_entry
;
1299 /* Build the jump pad. */
1301 /* First, do tracepoint data collection. Save registers. */
1303 buf
[i
++] = 0x60; /* pushad */
1304 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1305 *((int *)(buf
+ i
)) = (int) tpaddr
;
1307 buf
[i
++] = 0x9c; /* pushf */
1308 buf
[i
++] = 0x1e; /* push %ds */
1309 buf
[i
++] = 0x06; /* push %es */
1310 buf
[i
++] = 0x0f; /* push %fs */
1312 buf
[i
++] = 0x0f; /* push %gs */
1314 buf
[i
++] = 0x16; /* push %ss */
1315 buf
[i
++] = 0x0e; /* push %cs */
1316 append_insns (&buildaddr
, i
, buf
);
1318 /* Stack space for the collecting_t object. */
1320 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1322 /* Build the object. */
1323 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1324 memcpy (buf
+ i
, &tpoint
, 4);
1326 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1328 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1329 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1330 append_insns (&buildaddr
, i
, buf
);
1332 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1333 If we cared for it, this could be using xchg alternatively. */
1336 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1337 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1339 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1341 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1342 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1343 append_insns (&buildaddr
, i
, buf
);
1346 /* Set up arguments to the gdb_collect call. */
1348 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1349 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1350 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1351 append_insns (&buildaddr
, i
, buf
);
1354 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1355 append_insns (&buildaddr
, i
, buf
);
1358 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1359 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1361 append_insns (&buildaddr
, i
, buf
);
1363 buf
[0] = 0xe8; /* call <reladdr> */
1364 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1365 memcpy (buf
+ 1, &offset
, 4);
1366 append_insns (&buildaddr
, 5, buf
);
1367 /* Clean up after the call. */
1368 buf
[0] = 0x83; /* add $0x8,%esp */
1371 append_insns (&buildaddr
, 3, buf
);
1374 /* Clear the spin-lock. This would need the LOCK prefix on older
1377 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1378 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1379 memcpy (buf
+ i
, &lockaddr
, 4);
1381 append_insns (&buildaddr
, i
, buf
);
1384 /* Remove stack that had been used for the collect_t object. */
1386 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1387 append_insns (&buildaddr
, i
, buf
);
1390 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1393 buf
[i
++] = 0x17; /* pop %ss */
1394 buf
[i
++] = 0x0f; /* pop %gs */
1396 buf
[i
++] = 0x0f; /* pop %fs */
1398 buf
[i
++] = 0x07; /* pop %es */
1399 buf
[i
++] = 0x1f; /* pop %ds */
1400 buf
[i
++] = 0x9d; /* popf */
1401 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1404 buf
[i
++] = 0x61; /* popad */
1405 append_insns (&buildaddr
, i
, buf
);
1407 /* Now, adjust the original instruction to execute in the jump
1409 *adjusted_insn_addr
= buildaddr
;
1410 relocate_instruction (&buildaddr
, tpaddr
);
1411 *adjusted_insn_addr_end
= buildaddr
;
1413 /* Write the jump back to the program. */
1414 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1415 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1416 memcpy (buf
+ 1, &offset
, 4);
1417 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1419 /* The jump pad is now built. Wire in a jump to our jump pad. This
1420 is always done last (by our caller actually), so that we can
1421 install fast tracepoints with threads running. This relies on
1422 the agent's atomic write support. */
1425 /* Create a trampoline. */
1426 *trampoline_size
= sizeof (jump_insn
);
1427 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1429 /* No trampoline space available. */
1431 "E.Cannot allocate trampoline space needed for fast "
1432 "tracepoints on 4-byte instructions.");
1436 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1437 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1438 memcpy (buf
+ 1, &offset
, 4);
1439 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1441 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1442 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1443 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1444 memcpy (buf
+ 2, &offset
, 2);
1445 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1446 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1450 /* Else use a 32-bit relative jump instruction. */
1451 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1452 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1453 memcpy (buf
+ 1, &offset
, 4);
1454 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1455 *jjump_pad_insn_size
= sizeof (jump_insn
);
1458 /* Return the end address of our pad. */
1459 *jump_entry
= buildaddr
;
1465 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1466 CORE_ADDR collector
,
1469 CORE_ADDR
*jump_entry
,
1470 CORE_ADDR
*trampoline
,
1471 ULONGEST
*trampoline_size
,
1472 unsigned char *jjump_pad_insn
,
1473 ULONGEST
*jjump_pad_insn_size
,
1474 CORE_ADDR
*adjusted_insn_addr
,
1475 CORE_ADDR
*adjusted_insn_addr_end
,
1479 if (is_64bit_tdesc ())
1480 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1481 collector
, lockaddr
,
1482 orig_size
, jump_entry
,
1483 trampoline
, trampoline_size
,
1485 jjump_pad_insn_size
,
1487 adjusted_insn_addr_end
,
1491 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1492 collector
, lockaddr
,
1493 orig_size
, jump_entry
,
1494 trampoline
, trampoline_size
,
1496 jjump_pad_insn_size
,
1498 adjusted_insn_addr_end
,
1502 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1506 x86_get_min_fast_tracepoint_insn_len (void)
1508 static int warned_about_fast_tracepoints
= 0;
1511 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1512 used for fast tracepoints. */
1513 if (is_64bit_tdesc ())
1517 if (agent_loaded_p ())
1519 char errbuf
[IPA_BUFSIZ
];
1523 /* On x86, if trampolines are available, then 4-byte jump instructions
1524 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1525 with a 4-byte offset are used instead. */
1526 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1530 /* GDB has no channel to explain to user why a shorter fast
1531 tracepoint is not possible, but at least make GDBserver
1532 mention that something has gone awry. */
1533 if (!warned_about_fast_tracepoints
)
1535 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1536 warned_about_fast_tracepoints
= 1;
1543 /* Indicate that the minimum length is currently unknown since the IPA
1544 has not loaded yet. */
1550 add_insns (unsigned char *start
, int len
)
1552 CORE_ADDR buildaddr
= current_insn_ptr
;
1555 debug_printf ("Adding %d bytes of insn at %s\n",
1556 len
, paddress (buildaddr
));
1558 append_insns (&buildaddr
, len
, start
);
1559 current_insn_ptr
= buildaddr
;
1562 /* Our general strategy for emitting code is to avoid specifying raw
1563 bytes whenever possible, and instead copy a block of inline asm
1564 that is embedded in the function. This is a little messy, because
1565 we need to keep the compiler from discarding what looks like dead
1566 code, plus suppress various warnings. */
1568 #define EMIT_ASM(NAME, INSNS) \
1571 extern unsigned char start_ ## NAME, end_ ## NAME; \
1572 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1573 __asm__ ("jmp end_" #NAME "\n" \
1574 "\t" "start_" #NAME ":" \
1576 "\t" "end_" #NAME ":"); \
1581 #define EMIT_ASM32(NAME,INSNS) \
1584 extern unsigned char start_ ## NAME, end_ ## NAME; \
1585 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1586 __asm__ (".code32\n" \
1587 "\t" "jmp end_" #NAME "\n" \
1588 "\t" "start_" #NAME ":\n" \
1590 "\t" "end_" #NAME ":\n" \
1596 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1603 amd64_emit_prologue (void)
1605 EMIT_ASM (amd64_prologue
,
1607 "movq %rsp,%rbp\n\t"
1608 "sub $0x20,%rsp\n\t"
1609 "movq %rdi,-8(%rbp)\n\t"
1610 "movq %rsi,-16(%rbp)");
1615 amd64_emit_epilogue (void)
1617 EMIT_ASM (amd64_epilogue
,
1618 "movq -16(%rbp),%rdi\n\t"
1619 "movq %rax,(%rdi)\n\t"
1626 amd64_emit_add (void)
1628 EMIT_ASM (amd64_add
,
1629 "add (%rsp),%rax\n\t"
1630 "lea 0x8(%rsp),%rsp");
1634 amd64_emit_sub (void)
1636 EMIT_ASM (amd64_sub
,
1637 "sub %rax,(%rsp)\n\t"
1642 amd64_emit_mul (void)
1648 amd64_emit_lsh (void)
1654 amd64_emit_rsh_signed (void)
1660 amd64_emit_rsh_unsigned (void)
1666 amd64_emit_ext (int arg
)
1671 EMIT_ASM (amd64_ext_8
,
1677 EMIT_ASM (amd64_ext_16
,
1682 EMIT_ASM (amd64_ext_32
,
1691 amd64_emit_log_not (void)
1693 EMIT_ASM (amd64_log_not
,
1694 "test %rax,%rax\n\t"
1700 amd64_emit_bit_and (void)
1702 EMIT_ASM (amd64_and
,
1703 "and (%rsp),%rax\n\t"
1704 "lea 0x8(%rsp),%rsp");
1708 amd64_emit_bit_or (void)
1711 "or (%rsp),%rax\n\t"
1712 "lea 0x8(%rsp),%rsp");
1716 amd64_emit_bit_xor (void)
1718 EMIT_ASM (amd64_xor
,
1719 "xor (%rsp),%rax\n\t"
1720 "lea 0x8(%rsp),%rsp");
1724 amd64_emit_bit_not (void)
1726 EMIT_ASM (amd64_bit_not
,
1727 "xorq $0xffffffffffffffff,%rax");
1731 amd64_emit_equal (void)
1733 EMIT_ASM (amd64_equal
,
1734 "cmp %rax,(%rsp)\n\t"
1735 "je .Lamd64_equal_true\n\t"
1737 "jmp .Lamd64_equal_end\n\t"
1738 ".Lamd64_equal_true:\n\t"
1740 ".Lamd64_equal_end:\n\t"
1741 "lea 0x8(%rsp),%rsp");
1745 amd64_emit_less_signed (void)
1747 EMIT_ASM (amd64_less_signed
,
1748 "cmp %rax,(%rsp)\n\t"
1749 "jl .Lamd64_less_signed_true\n\t"
1751 "jmp .Lamd64_less_signed_end\n\t"
1752 ".Lamd64_less_signed_true:\n\t"
1754 ".Lamd64_less_signed_end:\n\t"
1755 "lea 0x8(%rsp),%rsp");
1759 amd64_emit_less_unsigned (void)
1761 EMIT_ASM (amd64_less_unsigned
,
1762 "cmp %rax,(%rsp)\n\t"
1763 "jb .Lamd64_less_unsigned_true\n\t"
1765 "jmp .Lamd64_less_unsigned_end\n\t"
1766 ".Lamd64_less_unsigned_true:\n\t"
1768 ".Lamd64_less_unsigned_end:\n\t"
1769 "lea 0x8(%rsp),%rsp");
1773 amd64_emit_ref (int size
)
1778 EMIT_ASM (amd64_ref1
,
1782 EMIT_ASM (amd64_ref2
,
1786 EMIT_ASM (amd64_ref4
,
1787 "movl (%rax),%eax");
1790 EMIT_ASM (amd64_ref8
,
1791 "movq (%rax),%rax");
1797 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1799 EMIT_ASM (amd64_if_goto
,
1803 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1811 amd64_emit_goto (int *offset_p
, int *size_p
)
1813 EMIT_ASM (amd64_goto
,
1814 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1822 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1824 int diff
= (to
- (from
+ size
));
1825 unsigned char buf
[sizeof (int)];
1833 memcpy (buf
, &diff
, sizeof (int));
1834 write_inferior_memory (from
, buf
, sizeof (int));
1838 amd64_emit_const (LONGEST num
)
1840 unsigned char buf
[16];
1842 CORE_ADDR buildaddr
= current_insn_ptr
;
1845 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1846 memcpy (&buf
[i
], &num
, sizeof (num
));
1848 append_insns (&buildaddr
, i
, buf
);
1849 current_insn_ptr
= buildaddr
;
1853 amd64_emit_call (CORE_ADDR fn
)
1855 unsigned char buf
[16];
1857 CORE_ADDR buildaddr
;
1860 /* The destination function being in the shared library, may be
1861 >31-bits away off the compiled code pad. */
1863 buildaddr
= current_insn_ptr
;
1865 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1869 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1871 /* Offset is too large for a call. Use callq, but that requires
1872 a register, so avoid it if possible. Use r10, since it is
1873 call-clobbered, we don't have to push/pop it. */
1874 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1876 memcpy (buf
+ i
, &fn
, 8);
1878 buf
[i
++] = 0xff; /* callq *%r10 */
1883 int offset32
= offset64
; /* we know we can't overflow here. */
1885 buf
[i
++] = 0xe8; /* call <reladdr> */
1886 memcpy (buf
+ i
, &offset32
, 4);
1890 append_insns (&buildaddr
, i
, buf
);
1891 current_insn_ptr
= buildaddr
;
1895 amd64_emit_reg (int reg
)
1897 unsigned char buf
[16];
1899 CORE_ADDR buildaddr
;
1901 /* Assume raw_regs is still in %rdi. */
1902 buildaddr
= current_insn_ptr
;
1904 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1905 memcpy (&buf
[i
], ®
, sizeof (reg
));
1907 append_insns (&buildaddr
, i
, buf
);
1908 current_insn_ptr
= buildaddr
;
1909 amd64_emit_call (get_raw_reg_func_addr ());
1913 amd64_emit_pop (void)
1915 EMIT_ASM (amd64_pop
,
1920 amd64_emit_stack_flush (void)
1922 EMIT_ASM (amd64_stack_flush
,
1927 amd64_emit_zero_ext (int arg
)
1932 EMIT_ASM (amd64_zero_ext_8
,
1936 EMIT_ASM (amd64_zero_ext_16
,
1937 "and $0xffff,%rax");
1940 EMIT_ASM (amd64_zero_ext_32
,
1941 "mov $0xffffffff,%rcx\n\t"
1950 amd64_emit_swap (void)
1952 EMIT_ASM (amd64_swap
,
1959 amd64_emit_stack_adjust (int n
)
1961 unsigned char buf
[16];
1963 CORE_ADDR buildaddr
= current_insn_ptr
;
1966 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1970 /* This only handles adjustments up to 16, but we don't expect any more. */
1972 append_insns (&buildaddr
, i
, buf
);
1973 current_insn_ptr
= buildaddr
;
1976 /* FN's prototype is `LONGEST(*fn)(int)'. */
1979 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1981 unsigned char buf
[16];
1983 CORE_ADDR buildaddr
;
1985 buildaddr
= current_insn_ptr
;
1987 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1988 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1990 append_insns (&buildaddr
, i
, buf
);
1991 current_insn_ptr
= buildaddr
;
1992 amd64_emit_call (fn
);
1995 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1998 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2000 unsigned char buf
[16];
2002 CORE_ADDR buildaddr
;
2004 buildaddr
= current_insn_ptr
;
2006 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2007 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2009 append_insns (&buildaddr
, i
, buf
);
2010 current_insn_ptr
= buildaddr
;
2011 EMIT_ASM (amd64_void_call_2_a
,
2012 /* Save away a copy of the stack top. */
2014 /* Also pass top as the second argument. */
2016 amd64_emit_call (fn
);
2017 EMIT_ASM (amd64_void_call_2_b
,
2018 /* Restore the stack top, %rax may have been trashed. */
2023 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2026 "cmp %rax,(%rsp)\n\t"
2027 "jne .Lamd64_eq_fallthru\n\t"
2028 "lea 0x8(%rsp),%rsp\n\t"
2030 /* jmp, but don't trust the assembler to choose the right jump */
2031 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2032 ".Lamd64_eq_fallthru:\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2043 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2046 "cmp %rax,(%rsp)\n\t"
2047 "je .Lamd64_ne_fallthru\n\t"
2048 "lea 0x8(%rsp),%rsp\n\t"
2050 /* jmp, but don't trust the assembler to choose the right jump */
2051 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2052 ".Lamd64_ne_fallthru:\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2063 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2066 "cmp %rax,(%rsp)\n\t"
2067 "jnl .Lamd64_lt_fallthru\n\t"
2068 "lea 0x8(%rsp),%rsp\n\t"
2070 /* jmp, but don't trust the assembler to choose the right jump */
2071 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2072 ".Lamd64_lt_fallthru:\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2083 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2086 "cmp %rax,(%rsp)\n\t"
2087 "jnle .Lamd64_le_fallthru\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2090 /* jmp, but don't trust the assembler to choose the right jump */
2091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2092 ".Lamd64_le_fallthru:\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2103 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2106 "cmp %rax,(%rsp)\n\t"
2107 "jng .Lamd64_gt_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_gt_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2123 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2126 "cmp %rax,(%rsp)\n\t"
2127 "jnge .Lamd64_ge_fallthru\n\t"
2128 ".Lamd64_ge_jump:\n\t"
2129 "lea 0x8(%rsp),%rsp\n\t"
2131 /* jmp, but don't trust the assembler to choose the right jump */
2132 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2133 ".Lamd64_ge_fallthru:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2143 struct emit_ops amd64_emit_ops
=
2145 amd64_emit_prologue
,
2146 amd64_emit_epilogue
,
2151 amd64_emit_rsh_signed
,
2152 amd64_emit_rsh_unsigned
,
2160 amd64_emit_less_signed
,
2161 amd64_emit_less_unsigned
,
2165 amd64_write_goto_address
,
2170 amd64_emit_stack_flush
,
2171 amd64_emit_zero_ext
,
2173 amd64_emit_stack_adjust
,
2174 amd64_emit_int_call_1
,
2175 amd64_emit_void_call_2
,
2184 #endif /* __x86_64__ */
2187 i386_emit_prologue (void)
2189 EMIT_ASM32 (i386_prologue
,
2193 /* At this point, the raw regs base address is at 8(%ebp), and the
2194 value pointer is at 12(%ebp). */
2198 i386_emit_epilogue (void)
2200 EMIT_ASM32 (i386_epilogue
,
2201 "mov 12(%ebp),%ecx\n\t"
2202 "mov %eax,(%ecx)\n\t"
2203 "mov %ebx,0x4(%ecx)\n\t"
2211 i386_emit_add (void)
2213 EMIT_ASM32 (i386_add
,
2214 "add (%esp),%eax\n\t"
2215 "adc 0x4(%esp),%ebx\n\t"
2216 "lea 0x8(%esp),%esp");
2220 i386_emit_sub (void)
2222 EMIT_ASM32 (i386_sub
,
2223 "subl %eax,(%esp)\n\t"
2224 "sbbl %ebx,4(%esp)\n\t"
2230 i386_emit_mul (void)
2236 i386_emit_lsh (void)
2242 i386_emit_rsh_signed (void)
2248 i386_emit_rsh_unsigned (void)
2254 i386_emit_ext (int arg
)
2259 EMIT_ASM32 (i386_ext_8
,
2262 "movl %eax,%ebx\n\t"
2266 EMIT_ASM32 (i386_ext_16
,
2268 "movl %eax,%ebx\n\t"
2272 EMIT_ASM32 (i386_ext_32
,
2273 "movl %eax,%ebx\n\t"
2282 i386_emit_log_not (void)
2284 EMIT_ASM32 (i386_log_not
,
2286 "test %eax,%eax\n\t"
2293 i386_emit_bit_and (void)
2295 EMIT_ASM32 (i386_and
,
2296 "and (%esp),%eax\n\t"
2297 "and 0x4(%esp),%ebx\n\t"
2298 "lea 0x8(%esp),%esp");
2302 i386_emit_bit_or (void)
2304 EMIT_ASM32 (i386_or
,
2305 "or (%esp),%eax\n\t"
2306 "or 0x4(%esp),%ebx\n\t"
2307 "lea 0x8(%esp),%esp");
2311 i386_emit_bit_xor (void)
2313 EMIT_ASM32 (i386_xor
,
2314 "xor (%esp),%eax\n\t"
2315 "xor 0x4(%esp),%ebx\n\t"
2316 "lea 0x8(%esp),%esp");
2320 i386_emit_bit_not (void)
2322 EMIT_ASM32 (i386_bit_not
,
2323 "xor $0xffffffff,%eax\n\t"
2324 "xor $0xffffffff,%ebx\n\t");
2328 i386_emit_equal (void)
2330 EMIT_ASM32 (i386_equal
,
2331 "cmpl %ebx,4(%esp)\n\t"
2332 "jne .Li386_equal_false\n\t"
2333 "cmpl %eax,(%esp)\n\t"
2334 "je .Li386_equal_true\n\t"
2335 ".Li386_equal_false:\n\t"
2337 "jmp .Li386_equal_end\n\t"
2338 ".Li386_equal_true:\n\t"
2340 ".Li386_equal_end:\n\t"
2342 "lea 0x8(%esp),%esp");
2346 i386_emit_less_signed (void)
2348 EMIT_ASM32 (i386_less_signed
,
2349 "cmpl %ebx,4(%esp)\n\t"
2350 "jl .Li386_less_signed_true\n\t"
2351 "jne .Li386_less_signed_false\n\t"
2352 "cmpl %eax,(%esp)\n\t"
2353 "jl .Li386_less_signed_true\n\t"
2354 ".Li386_less_signed_false:\n\t"
2356 "jmp .Li386_less_signed_end\n\t"
2357 ".Li386_less_signed_true:\n\t"
2359 ".Li386_less_signed_end:\n\t"
2361 "lea 0x8(%esp),%esp");
2365 i386_emit_less_unsigned (void)
2367 EMIT_ASM32 (i386_less_unsigned
,
2368 "cmpl %ebx,4(%esp)\n\t"
2369 "jb .Li386_less_unsigned_true\n\t"
2370 "jne .Li386_less_unsigned_false\n\t"
2371 "cmpl %eax,(%esp)\n\t"
2372 "jb .Li386_less_unsigned_true\n\t"
2373 ".Li386_less_unsigned_false:\n\t"
2375 "jmp .Li386_less_unsigned_end\n\t"
2376 ".Li386_less_unsigned_true:\n\t"
2378 ".Li386_less_unsigned_end:\n\t"
2380 "lea 0x8(%esp),%esp");
2384 i386_emit_ref (int size
)
2389 EMIT_ASM32 (i386_ref1
,
2393 EMIT_ASM32 (i386_ref2
,
2397 EMIT_ASM32 (i386_ref4
,
2398 "movl (%eax),%eax");
2401 EMIT_ASM32 (i386_ref8
,
2402 "movl 4(%eax),%ebx\n\t"
2403 "movl (%eax),%eax");
2409 i386_emit_if_goto (int *offset_p
, int *size_p
)
2411 EMIT_ASM32 (i386_if_goto
,
2417 /* Don't trust the assembler to choose the right jump */
2418 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2421 *offset_p
= 11; /* be sure that this matches the sequence above */
2427 i386_emit_goto (int *offset_p
, int *size_p
)
2429 EMIT_ASM32 (i386_goto
,
2430 /* Don't trust the assembler to choose the right jump */
2431 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2439 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2441 int diff
= (to
- (from
+ size
));
2442 unsigned char buf
[sizeof (int)];
2444 /* We're only doing 4-byte sizes at the moment. */
2451 memcpy (buf
, &diff
, sizeof (int));
2452 write_inferior_memory (from
, buf
, sizeof (int));
2456 i386_emit_const (LONGEST num
)
2458 unsigned char buf
[16];
2460 CORE_ADDR buildaddr
= current_insn_ptr
;
2463 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2464 lo
= num
& 0xffffffff;
2465 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2467 hi
= ((num
>> 32) & 0xffffffff);
2470 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2471 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2476 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2478 append_insns (&buildaddr
, i
, buf
);
2479 current_insn_ptr
= buildaddr
;
2483 i386_emit_call (CORE_ADDR fn
)
2485 unsigned char buf
[16];
2487 CORE_ADDR buildaddr
;
2489 buildaddr
= current_insn_ptr
;
2491 buf
[i
++] = 0xe8; /* call <reladdr> */
2492 offset
= ((int) fn
) - (buildaddr
+ 5);
2493 memcpy (buf
+ 1, &offset
, 4);
2494 append_insns (&buildaddr
, 5, buf
);
2495 current_insn_ptr
= buildaddr
;
2499 i386_emit_reg (int reg
)
2501 unsigned char buf
[16];
2503 CORE_ADDR buildaddr
;
2505 EMIT_ASM32 (i386_reg_a
,
2507 buildaddr
= current_insn_ptr
;
2509 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2510 memcpy (&buf
[i
], ®
, sizeof (reg
));
2512 append_insns (&buildaddr
, i
, buf
);
2513 current_insn_ptr
= buildaddr
;
2514 EMIT_ASM32 (i386_reg_b
,
2515 "mov %eax,4(%esp)\n\t"
2516 "mov 8(%ebp),%eax\n\t"
2518 i386_emit_call (get_raw_reg_func_addr ());
2519 EMIT_ASM32 (i386_reg_c
,
2521 "lea 0x8(%esp),%esp");
2525 i386_emit_pop (void)
2527 EMIT_ASM32 (i386_pop
,
2533 i386_emit_stack_flush (void)
2535 EMIT_ASM32 (i386_stack_flush
,
2541 i386_emit_zero_ext (int arg
)
2546 EMIT_ASM32 (i386_zero_ext_8
,
2547 "and $0xff,%eax\n\t"
2551 EMIT_ASM32 (i386_zero_ext_16
,
2552 "and $0xffff,%eax\n\t"
2556 EMIT_ASM32 (i386_zero_ext_32
,
2565 i386_emit_swap (void)
2567 EMIT_ASM32 (i386_swap
,
2577 i386_emit_stack_adjust (int n
)
2579 unsigned char buf
[16];
2581 CORE_ADDR buildaddr
= current_insn_ptr
;
2584 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2588 append_insns (&buildaddr
, i
, buf
);
2589 current_insn_ptr
= buildaddr
;
2592 /* FN's prototype is `LONGEST(*fn)(int)'. */
2595 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2597 unsigned char buf
[16];
2599 CORE_ADDR buildaddr
;
2601 EMIT_ASM32 (i386_int_call_1_a
,
2602 /* Reserve a bit of stack space. */
2604 /* Put the one argument on the stack. */
2605 buildaddr
= current_insn_ptr
;
2607 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2610 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2612 append_insns (&buildaddr
, i
, buf
);
2613 current_insn_ptr
= buildaddr
;
2614 i386_emit_call (fn
);
2615 EMIT_ASM32 (i386_int_call_1_c
,
2617 "lea 0x8(%esp),%esp");
2620 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2623 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2625 unsigned char buf
[16];
2627 CORE_ADDR buildaddr
;
2629 EMIT_ASM32 (i386_void_call_2_a
,
2630 /* Preserve %eax only; we don't have to worry about %ebx. */
2632 /* Reserve a bit of stack space for arguments. */
2633 "sub $0x10,%esp\n\t"
2634 /* Copy "top" to the second argument position. (Note that
2635 we can't assume function won't scribble on its
2636 arguments, so don't try to restore from this.) */
2637 "mov %eax,4(%esp)\n\t"
2638 "mov %ebx,8(%esp)");
2639 /* Put the first argument on the stack. */
2640 buildaddr
= current_insn_ptr
;
2642 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2645 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2647 append_insns (&buildaddr
, i
, buf
);
2648 current_insn_ptr
= buildaddr
;
2649 i386_emit_call (fn
);
2650 EMIT_ASM32 (i386_void_call_2_b
,
2651 "lea 0x10(%esp),%esp\n\t"
2652 /* Restore original stack top. */
2658 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2661 /* Check low half first, more likely to be decider */
2662 "cmpl %eax,(%esp)\n\t"
2663 "jne .Leq_fallthru\n\t"
2664 "cmpl %ebx,4(%esp)\n\t"
2665 "jne .Leq_fallthru\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2669 /* jmp, but don't trust the assembler to choose the right jump */
2670 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2671 ".Leq_fallthru:\n\t"
2672 "lea 0x8(%esp),%esp\n\t"
2683 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2686 /* Check low half first, more likely to be decider */
2687 "cmpl %eax,(%esp)\n\t"
2689 "cmpl %ebx,4(%esp)\n\t"
2690 "je .Lne_fallthru\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2695 /* jmp, but don't trust the assembler to choose the right jump */
2696 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2697 ".Lne_fallthru:\n\t"
2698 "lea 0x8(%esp),%esp\n\t"
2709 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2712 "cmpl %ebx,4(%esp)\n\t"
2714 "jne .Llt_fallthru\n\t"
2715 "cmpl %eax,(%esp)\n\t"
2716 "jnl .Llt_fallthru\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2721 /* jmp, but don't trust the assembler to choose the right jump */
2722 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2723 ".Llt_fallthru:\n\t"
2724 "lea 0x8(%esp),%esp\n\t"
2735 i386_emit_le_goto (int *offset_p
, int *size_p
)
2738 "cmpl %ebx,4(%esp)\n\t"
2740 "jne .Lle_fallthru\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jnle .Lle_fallthru\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Lle_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2761 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2764 "cmpl %ebx,4(%esp)\n\t"
2766 "jne .Lgt_fallthru\n\t"
2767 "cmpl %eax,(%esp)\n\t"
2768 "jng .Lgt_fallthru\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lgt_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2787 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2790 "cmpl %ebx,4(%esp)\n\t"
2792 "jne .Lge_fallthru\n\t"
2793 "cmpl %eax,(%esp)\n\t"
2794 "jnge .Lge_fallthru\n\t"
2796 "lea 0x8(%esp),%esp\n\t"
2799 /* jmp, but don't trust the assembler to choose the right jump */
2800 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2801 ".Lge_fallthru:\n\t"
2802 "lea 0x8(%esp),%esp\n\t"
2812 struct emit_ops i386_emit_ops
=
2820 i386_emit_rsh_signed
,
2821 i386_emit_rsh_unsigned
,
2829 i386_emit_less_signed
,
2830 i386_emit_less_unsigned
,
2834 i386_write_goto_address
,
2839 i386_emit_stack_flush
,
2842 i386_emit_stack_adjust
,
2843 i386_emit_int_call_1
,
2844 i386_emit_void_call_2
,
2854 static struct emit_ops
*
2858 if (is_64bit_tdesc ())
2859 return &amd64_emit_ops
;
2862 return &i386_emit_ops
;
2865 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2867 static const gdb_byte
*
2868 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2870 *size
= x86_breakpoint_len
;
2871 return x86_breakpoint
;
2875 x86_supports_range_stepping (void)
2880 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2884 x86_supports_hardware_single_step (void)
2890 x86_get_ipa_tdesc_idx (void)
2892 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2893 const struct target_desc
*tdesc
= regcache
->tdesc
;
2896 if (tdesc
== tdesc_amd64_linux
|| tdesc
== tdesc_amd64_linux_no_xml
2897 || tdesc
== tdesc_x32_linux
)
2898 return X86_TDESC_SSE
;
2899 if (tdesc
== tdesc_amd64_avx_linux
|| tdesc
== tdesc_x32_avx_linux
)
2900 return X86_TDESC_AVX
;
2901 if (tdesc
== tdesc_amd64_mpx_linux
)
2902 return X86_TDESC_MPX
;
2903 if (tdesc
== tdesc_amd64_avx_mpx_linux
)
2904 return X86_TDESC_AVX_MPX
;
2905 if (tdesc
== tdesc_amd64_avx_mpx_avx512_linux
|| tdesc
== tdesc_x32_avx_avx512_linux
)
2906 return X86_TDESC_AVX_MPX_AVX512
;
2907 if (tdesc
== tdesc_amd64_avx_avx512_linux
)
2908 return X86_TDESC_AVX_AVX512
;
2911 if (tdesc
== tdesc_i386_mmx_linux
)
2912 return X86_TDESC_MMX
;
2913 if (tdesc
== tdesc_i386_linux
|| tdesc
== tdesc_i386_linux_no_xml
)
2914 return X86_TDESC_SSE
;
2915 if (tdesc
== tdesc_i386_avx_linux
)
2916 return X86_TDESC_AVX
;
2917 if (tdesc
== tdesc_i386_mpx_linux
)
2918 return X86_TDESC_MPX
;
2919 if (tdesc
== tdesc_i386_avx_mpx_linux
)
2920 return X86_TDESC_AVX_MPX
;
2921 if (tdesc
== tdesc_i386_avx_mpx_avx512_linux
)
2922 return X86_TDESC_AVX_MPX_AVX512
;
2923 if (tdesc
== tdesc_i386_avx_avx512_linux
)
2924 return X86_TDESC_AVX_AVX512
;
2929 /* This is initialized assuming an amd64 target.
2930 x86_arch_setup will correct it for i386 or amd64 targets. */
2932 struct linux_target_ops the_low_target
=
2935 x86_linux_regs_info
,
2936 x86_cannot_fetch_register
,
2937 x86_cannot_store_register
,
2938 NULL
, /* fetch_register */
2941 NULL
, /* breakpoint_kind_from_pc */
2942 x86_sw_breakpoint_from_kind
,
2946 x86_supports_z_point_type
,
2949 x86_stopped_by_watchpoint
,
2950 x86_stopped_data_address
,
2951 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2952 native i386 case (no registers smaller than an xfer unit), and are not
2953 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2956 /* need to fix up i386 siginfo if host is amd64 */
2958 x86_linux_new_process
,
2959 x86_linux_new_thread
,
2961 x86_linux_prepare_to_resume
,
2962 x86_linux_process_qsupported
,
2963 x86_supports_tracepoints
,
2964 x86_get_thread_area
,
2965 x86_install_fast_tracepoint_jump_pad
,
2967 x86_get_min_fast_tracepoint_insn_len
,
2968 x86_supports_range_stepping
,
2969 NULL
, /* breakpoint_kind_from_current_state */
2970 x86_supports_hardware_single_step
,
2971 x86_get_syscall_trapinfo
,
2972 x86_get_ipa_tdesc_idx
,
2976 initialize_low_arch (void)
2978 /* Initialize the Linux target descriptions. */
2980 init_registers_amd64_linux ();
2981 init_registers_amd64_avx_linux ();
2982 init_registers_amd64_mpx_linux ();
2983 init_registers_amd64_avx_mpx_linux ();
2984 init_registers_amd64_avx_avx512_linux ();
2985 init_registers_amd64_avx_mpx_avx512_linux ();
2987 init_registers_x32_linux ();
2988 init_registers_x32_avx_linux ();
2989 init_registers_x32_avx_avx512_linux ();
2991 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2992 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
2993 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2995 init_registers_i386_linux ();
2996 init_registers_i386_mmx_linux ();
2997 init_registers_i386_avx_linux ();
2998 init_registers_i386_mpx_linux ();
2999 init_registers_i386_avx_mpx_linux ();
3000 init_registers_i386_avx_avx512_linux ();
3001 init_registers_i386_avx_mpx_avx512_linux ();
3003 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
3004 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3005 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3007 initialize_regsets_info (&x86_regsets_info
);