1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
157 #else /* ! __x86_64__ */
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap
[] =
163 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
164 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
165 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
166 DS
* 4, ES
* 4, FS
* 4, GS
* 4
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177 /* Returns true if the current inferior belongs to a x86-64 process,
181 is_64bit_tdesc (void)
183 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
185 return register_size (regcache
->tdesc
, 0) == 8;
191 /* Called by libthread_db. */
194 ps_get_thread_area (struct ps_prochandle
*ph
,
195 lwpid_t lwpid
, int idx
, void **base
)
198 int use_64bit
= is_64bit_tdesc ();
205 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
209 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
220 unsigned int desc
[4];
222 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
223 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base
= (void *) (uintptr_t) desc
[1];
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
238 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
241 int use_64bit
= is_64bit_tdesc ();
246 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
248 *addr
= (CORE_ADDR
) (uintptr_t) base
;
257 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
258 struct thread_info
*thr
= get_lwp_thread (lwp
);
259 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
260 unsigned int desc
[4];
262 const int reg_thread_area
= 3; /* bits to scale down register value. */
265 collect_register_by_name (regcache
, "gs", &gs
);
267 idx
= gs
>> reg_thread_area
;
269 if (ptrace (PTRACE_GET_THREAD_AREA
,
271 (void *) (long) idx
, (unsigned long) &desc
) < 0)
282 x86_cannot_store_register (int regno
)
285 if (is_64bit_tdesc ())
289 return regno
>= I386_NUM_REGS
;
293 x86_cannot_fetch_register (int regno
)
296 if (is_64bit_tdesc ())
300 return regno
>= I386_NUM_REGS
;
304 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
309 if (register_size (regcache
->tdesc
, 0) == 8)
311 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
312 if (x86_64_regmap
[i
] != -1)
313 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
318 int lwpid
= lwpid_of (current_thread
);
320 collect_register_by_name (regcache
, "fs_base", &base
);
321 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
323 collect_register_by_name (regcache
, "gs_base", &base
);
324 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
336 for (i
= 0; i
< I386_NUM_REGS
; i
++)
337 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
339 collect_register_by_name (regcache
, "orig_eax",
340 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
344 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
349 if (register_size (regcache
->tdesc
, 0) == 8)
351 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
352 if (x86_64_regmap
[i
] != -1)
353 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
355 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
358 int lwpid
= lwpid_of (current_thread
);
360 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
361 supply_register_by_name (regcache
, "fs_base", &base
);
363 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
364 supply_register_by_name (regcache
, "gs_base", &base
);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 supply_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
379 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
382 i387_cache_to_fxsave (regcache
, buf
);
384 i387_cache_to_fsave (regcache
, buf
);
389 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
392 i387_fxsave_to_cache (regcache
, buf
);
394 i387_fsave_to_cache (regcache
, buf
);
401 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
403 i387_cache_to_fxsave (regcache
, buf
);
407 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
409 i387_fxsave_to_cache (regcache
, buf
);
415 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
417 i387_cache_to_xsave (regcache
, buf
);
421 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
423 i387_xsave_to_cache (regcache
, buf
);
426 /* ??? The non-biarch i386 case stores all the i387 regs twice.
427 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
428 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
429 doesn't work. IWBN to avoid the duplication in the case where it
430 does work. Maybe the arch_setup routine could check whether it works
431 and update the supported regsets accordingly. */
433 static struct regset_info x86_regsets
[] =
435 #ifdef HAVE_PTRACE_GETREGS
436 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
438 x86_fill_gregset
, x86_store_gregset
},
439 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
440 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
442 # ifdef HAVE_PTRACE_GETFPXREGS
443 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
445 x86_fill_fpxregset
, x86_store_fpxregset
},
448 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
450 x86_fill_fpregset
, x86_store_fpregset
},
451 #endif /* HAVE_PTRACE_GETREGS */
456 x86_get_pc (struct regcache
*regcache
)
458 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
464 collect_register_by_name (regcache
, "rip", &pc
);
465 return (CORE_ADDR
) pc
;
471 collect_register_by_name (regcache
, "eip", &pc
);
472 return (CORE_ADDR
) pc
;
477 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
479 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
485 supply_register_by_name (regcache
, "rip", &newpc
);
491 supply_register_by_name (regcache
, "eip", &newpc
);
495 static const gdb_byte x86_breakpoint
[] = { 0xCC };
496 #define x86_breakpoint_len 1
499 x86_breakpoint_at (CORE_ADDR pc
)
503 (*the_target
->read_memory
) (pc
, &c
, 1);
510 /* Low-level function vector. */
511 struct x86_dr_low_type x86_dr_low
=
513 x86_linux_dr_set_control
,
514 x86_linux_dr_set_addr
,
515 x86_linux_dr_get_addr
,
516 x86_linux_dr_get_status
,
517 x86_linux_dr_get_control
,
521 /* Breakpoint/Watchpoint support. */
524 x86_supports_z_point_type (char z_type
)
530 case Z_PACKET_WRITE_WP
:
531 case Z_PACKET_ACCESS_WP
:
539 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
540 int size
, struct raw_breakpoint
*bp
)
542 struct process_info
*proc
= current_process ();
546 case raw_bkpt_type_hw
:
547 case raw_bkpt_type_write_wp
:
548 case raw_bkpt_type_access_wp
:
550 enum target_hw_bp_type hw_type
551 = raw_bkpt_type_to_target_hw_bp_type (type
);
552 struct x86_debug_reg_state
*state
553 = &proc
->priv
->arch_private
->debug_reg_state
;
555 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
565 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
566 int size
, struct raw_breakpoint
*bp
)
568 struct process_info
*proc
= current_process ();
572 case raw_bkpt_type_hw
:
573 case raw_bkpt_type_write_wp
:
574 case raw_bkpt_type_access_wp
:
576 enum target_hw_bp_type hw_type
577 = raw_bkpt_type_to_target_hw_bp_type (type
);
578 struct x86_debug_reg_state
*state
579 = &proc
->priv
->arch_private
->debug_reg_state
;
581 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
590 x86_stopped_by_watchpoint (void)
592 struct process_info
*proc
= current_process ();
593 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
597 x86_stopped_data_address (void)
599 struct process_info
*proc
= current_process ();
601 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
607 /* Called when a new process is created. */
609 static struct arch_process_info
*
610 x86_linux_new_process (void)
612 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
614 x86_low_init_dregs (&info
->debug_reg_state
);
619 /* Target routine for linux_new_fork. */
622 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
624 /* These are allocated by linux_add_process. */
625 gdb_assert (parent
->priv
!= NULL
626 && parent
->priv
->arch_private
!= NULL
);
627 gdb_assert (child
->priv
!= NULL
628 && child
->priv
->arch_private
!= NULL
);
630 /* Linux kernel before 2.6.33 commit
631 72f674d203cd230426437cdcf7dd6f681dad8b0d
632 will inherit hardware debug registers from parent
633 on fork/vfork/clone. Newer Linux kernels create such tasks with
634 zeroed debug registers.
636 GDB core assumes the child inherits the watchpoints/hw
637 breakpoints of the parent, and will remove them all from the
638 forked off process. Copy the debug registers mirrors into the
639 new process so that all breakpoints and watchpoints can be
640 removed together. The debug registers mirror will become zeroed
641 in the end before detaching the forked off process, thus making
642 this compatible with older Linux kernels too. */
644 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
647 /* See nat/x86-dregs.h. */
649 struct x86_debug_reg_state
*
650 x86_debug_reg_state (pid_t pid
)
652 struct process_info
*proc
= find_process_pid (pid
);
654 return &proc
->priv
->arch_private
->debug_reg_state
;
657 /* When GDBSERVER is built as a 64-bit application on linux, the
658 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
659 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
660 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
661 conversion in-place ourselves. */
663 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
664 layout of the inferiors' architecture. Returns true if any
665 conversion was done; false otherwise. If DIRECTION is 1, then copy
666 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
670 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
673 unsigned int machine
;
674 int tid
= lwpid_of (current_thread
);
675 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
677 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
678 if (!is_64bit_tdesc ())
679 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
681 /* No fixup for native x32 GDB. */
682 else if (!is_elf64
&& sizeof (void *) == 8)
683 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
692 /* Format of XSAVE extended state is:
696 sw_usable_bytes[464..511]
697 xstate_hdr_bytes[512..575]
702 Same memory layout will be used for the coredump NT_X86_XSTATE
703 representing the XSAVE extended state registers.
705 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
706 extended state mask, which is the same as the extended control register
707 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
708 together with the mask saved in the xstate_hdr_bytes to determine what
709 states the processor/OS supports and what state, used or initialized,
710 the process/thread is in. */
711 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
713 /* Does the current host support the GETFPXREGS request? The header
714 file may or may not define it, and even if it is defined, the
715 kernel will return EIO if it's running on a pre-SSE processor. */
716 int have_ptrace_getfpxregs
=
717 #ifdef HAVE_PTRACE_GETFPXREGS
724 /* Get Linux/x86 target description from running target. */
726 static const struct target_desc
*
727 x86_linux_read_description (void)
729 unsigned int machine
;
733 static uint64_t xcr0
;
734 struct regset_info
*regset
;
736 tid
= lwpid_of (current_thread
);
738 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
740 if (sizeof (void *) == 4)
743 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
745 else if (machine
== EM_X86_64
)
746 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
750 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
751 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
753 elf_fpxregset_t fpxregs
;
755 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
757 have_ptrace_getfpxregs
= 0;
758 have_ptrace_getregset
= 0;
759 return tdesc_i386_mmx_linux
;
762 have_ptrace_getfpxregs
= 1;
768 x86_xcr0
= X86_XSTATE_SSE_MASK
;
772 if (machine
== EM_X86_64
)
773 return tdesc_amd64_linux_no_xml
;
776 return tdesc_i386_linux_no_xml
;
779 if (have_ptrace_getregset
== -1)
781 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
784 iov
.iov_base
= xstateregs
;
785 iov
.iov_len
= sizeof (xstateregs
);
787 /* Check if PTRACE_GETREGSET works. */
788 if (ptrace (PTRACE_GETREGSET
, tid
,
789 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
790 have_ptrace_getregset
= 0;
793 have_ptrace_getregset
= 1;
795 /* Get XCR0 from XSAVE extended state. */
796 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
797 / sizeof (uint64_t))];
799 /* Use PTRACE_GETREGSET if it is available. */
800 for (regset
= x86_regsets
;
801 regset
->fill_function
!= NULL
; regset
++)
802 if (regset
->get_request
== PTRACE_GETREGSET
)
803 regset
->size
= X86_XSTATE_SIZE (xcr0
);
804 else if (regset
->type
!= GENERAL_REGS
)
809 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
810 xcr0_features
= (have_ptrace_getregset
811 && (xcr0
& X86_XSTATE_ALL_MASK
));
816 if (machine
== EM_X86_64
)
823 switch (xcr0
& X86_XSTATE_ALL_MASK
)
825 case X86_XSTATE_AVX_MPX_AVX512_MASK
:
826 case X86_XSTATE_AVX_AVX512_MASK
:
827 return tdesc_amd64_avx_mpx_avx512_linux
;
829 case X86_XSTATE_AVX_MPX_MASK
:
830 return tdesc_amd64_avx_mpx_linux
;
832 case X86_XSTATE_MPX_MASK
:
833 return tdesc_amd64_mpx_linux
;
835 case X86_XSTATE_AVX_MASK
:
836 return tdesc_amd64_avx_linux
;
839 return tdesc_amd64_linux
;
843 return tdesc_amd64_linux
;
849 switch (xcr0
& X86_XSTATE_ALL_MASK
)
851 case X86_XSTATE_AVX_MPX_AVX512_MASK
:
852 case X86_XSTATE_AVX_AVX512_MASK
:
853 return tdesc_x32_avx_mpx_avx512_linux
;
855 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
856 case X86_XSTATE_AVX_MASK
:
857 return tdesc_x32_avx_linux
;
860 return tdesc_x32_linux
;
864 return tdesc_x32_linux
;
872 switch (xcr0
& X86_XSTATE_ALL_MASK
)
874 case X86_XSTATE_AVX_MPX_AVX512_MASK
:
875 case (X86_XSTATE_AVX_AVX512_MASK
):
876 return tdesc_i386_avx_mpx_avx512_linux
;
878 case (X86_XSTATE_MPX_MASK
):
879 return tdesc_i386_mpx_linux
;
881 case (X86_XSTATE_AVX_MPX_MASK
):
882 return tdesc_i386_avx_mpx_linux
;
884 case (X86_XSTATE_AVX_MASK
):
885 return tdesc_i386_avx_linux
;
888 return tdesc_i386_linux
;
892 return tdesc_i386_linux
;
895 gdb_assert_not_reached ("failed to return tdesc");
898 /* Callback for find_inferior. Stops iteration when a thread with a
899 given PID is found. */
902 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
904 int pid
= *(int *) data
;
906 return (ptid_get_pid (entry
->id
) == pid
);
909 /* Callback for for_each_inferior. Calls the arch_setup routine for
913 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
915 int pid
= ptid_get_pid (entry
->id
);
917 /* Look up any thread of this processes. */
919 = (struct thread_info
*) find_inferior (&all_threads
,
920 same_process_callback
, &pid
);
922 the_low_target
.arch_setup ();
925 /* Update all the target description of all processes; a new GDB
926 connected, and it may or not support xml target descriptions. */
929 x86_linux_update_xmltarget (void)
931 struct thread_info
*saved_thread
= current_thread
;
933 /* Before changing the register cache's internal layout, flush the
934 contents of the current valid caches back to the threads, and
935 release the current regcache objects. */
938 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
940 current_thread
= saved_thread
;
943 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
947 x86_linux_process_qsupported (char **features
, int count
)
951 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
952 with "i386" in qSupported query, it supports x86 XML target
955 for (i
= 0; i
< count
; i
++)
957 const char *feature
= features
[i
];
959 if (startswith (feature
, "xmlRegisters="))
961 char *copy
= xstrdup (feature
+ 13);
964 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
966 if (strcmp (p
, "i386") == 0)
976 x86_linux_update_xmltarget ();
979 /* Common for x86/x86-64. */
981 static struct regsets_info x86_regsets_info
=
983 x86_regsets
, /* regsets */
985 NULL
, /* disabled_regsets */
989 static struct regs_info amd64_linux_regs_info
=
991 NULL
, /* regset_bitmap */
992 NULL
, /* usrregs_info */
996 static struct usrregs_info i386_linux_usrregs_info
=
1002 static struct regs_info i386_linux_regs_info
=
1004 NULL
, /* regset_bitmap */
1005 &i386_linux_usrregs_info
,
1009 const struct regs_info
*
1010 x86_linux_regs_info (void)
1013 if (is_64bit_tdesc ())
1014 return &amd64_linux_regs_info
;
1017 return &i386_linux_regs_info
;
1020 /* Initialize the target description for the architecture of the
1024 x86_arch_setup (void)
1026 current_process ()->tdesc
= x86_linux_read_description ();
1029 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1030 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1033 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1035 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1041 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1042 *sysno
= (int) l_sysno
;
1045 collect_register_by_name (regcache
, "orig_eax", sysno
);
1049 x86_supports_tracepoints (void)
1055 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1057 write_inferior_memory (*to
, buf
, len
);
1062 push_opcode (unsigned char *buf
, char *op
)
1064 unsigned char *buf_org
= buf
;
1069 unsigned long ul
= strtoul (op
, &endptr
, 16);
1078 return buf
- buf_org
;
1083 /* Build a jump pad that saves registers and calls a collection
1084 function. Writes a jump instruction to the jump pad to
1085 JJUMPAD_INSN. The caller is responsible to write it in at the
1086 tracepoint address. */
1089 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1090 CORE_ADDR collector
,
1093 CORE_ADDR
*jump_entry
,
1094 CORE_ADDR
*trampoline
,
1095 ULONGEST
*trampoline_size
,
1096 unsigned char *jjump_pad_insn
,
1097 ULONGEST
*jjump_pad_insn_size
,
1098 CORE_ADDR
*adjusted_insn_addr
,
1099 CORE_ADDR
*adjusted_insn_addr_end
,
1102 unsigned char buf
[40];
1106 CORE_ADDR buildaddr
= *jump_entry
;
1108 /* Build the jump pad. */
1110 /* First, do tracepoint data collection. Save registers. */
1112 /* Need to ensure stack pointer saved first. */
1113 buf
[i
++] = 0x54; /* push %rsp */
1114 buf
[i
++] = 0x55; /* push %rbp */
1115 buf
[i
++] = 0x57; /* push %rdi */
1116 buf
[i
++] = 0x56; /* push %rsi */
1117 buf
[i
++] = 0x52; /* push %rdx */
1118 buf
[i
++] = 0x51; /* push %rcx */
1119 buf
[i
++] = 0x53; /* push %rbx */
1120 buf
[i
++] = 0x50; /* push %rax */
1121 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1122 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1123 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1124 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1125 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1126 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1127 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1128 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1129 buf
[i
++] = 0x9c; /* pushfq */
1130 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1132 memcpy (buf
+ i
, &tpaddr
, 8);
1134 buf
[i
++] = 0x57; /* push %rdi */
1135 append_insns (&buildaddr
, i
, buf
);
1137 /* Stack space for the collecting_t object. */
1139 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1140 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1141 memcpy (buf
+ i
, &tpoint
, 8);
1143 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1144 i
+= push_opcode (&buf
[i
],
1145 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1146 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1147 append_insns (&buildaddr
, i
, buf
);
1151 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1152 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1154 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1155 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1156 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1157 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1158 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1159 append_insns (&buildaddr
, i
, buf
);
1161 /* Set up the gdb_collect call. */
1162 /* At this point, (stack pointer + 0x18) is the base of our saved
1166 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1167 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1169 /* tpoint address may be 64-bit wide. */
1170 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1171 memcpy (buf
+ i
, &tpoint
, 8);
1173 append_insns (&buildaddr
, i
, buf
);
1175 /* The collector function being in the shared library, may be
1176 >31-bits away off the jump pad. */
1178 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1179 memcpy (buf
+ i
, &collector
, 8);
1181 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1182 append_insns (&buildaddr
, i
, buf
);
1184 /* Clear the spin-lock. */
1186 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1187 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1188 memcpy (buf
+ i
, &lockaddr
, 8);
1190 append_insns (&buildaddr
, i
, buf
);
1192 /* Remove stack that had been used for the collect_t object. */
1194 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1195 append_insns (&buildaddr
, i
, buf
);
1197 /* Restore register state. */
1199 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1203 buf
[i
++] = 0x9d; /* popfq */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1209 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1210 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1211 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1212 buf
[i
++] = 0x58; /* pop %rax */
1213 buf
[i
++] = 0x5b; /* pop %rbx */
1214 buf
[i
++] = 0x59; /* pop %rcx */
1215 buf
[i
++] = 0x5a; /* pop %rdx */
1216 buf
[i
++] = 0x5e; /* pop %rsi */
1217 buf
[i
++] = 0x5f; /* pop %rdi */
1218 buf
[i
++] = 0x5d; /* pop %rbp */
1219 buf
[i
++] = 0x5c; /* pop %rsp */
1220 append_insns (&buildaddr
, i
, buf
);
1222 /* Now, adjust the original instruction to execute in the jump
1224 *adjusted_insn_addr
= buildaddr
;
1225 relocate_instruction (&buildaddr
, tpaddr
);
1226 *adjusted_insn_addr_end
= buildaddr
;
1228 /* Finally, write a jump back to the program. */
1230 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1231 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1234 "E.Jump back from jump pad too far from tracepoint "
1235 "(offset 0x%" PRIx64
" > int32).", loffset
);
1239 offset
= (int) loffset
;
1240 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1241 memcpy (buf
+ 1, &offset
, 4);
1242 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1244 /* The jump pad is now built. Wire in a jump to our jump pad. This
1245 is always done last (by our caller actually), so that we can
1246 install fast tracepoints with threads running. This relies on
1247 the agent's atomic write support. */
1248 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1249 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1252 "E.Jump pad too far from tracepoint "
1253 "(offset 0x%" PRIx64
" > int32).", loffset
);
1257 offset
= (int) loffset
;
1259 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1260 memcpy (buf
+ 1, &offset
, 4);
1261 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1262 *jjump_pad_insn_size
= sizeof (jump_insn
);
1264 /* Return the end address of our pad. */
1265 *jump_entry
= buildaddr
;
1270 #endif /* __x86_64__ */
1272 /* Build a jump pad that saves registers and calls a collection
1273 function. Writes a jump instruction to the jump pad to
1274 JJUMPAD_INSN. The caller is responsible to write it in at the
1275 tracepoint address. */
1278 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1279 CORE_ADDR collector
,
1282 CORE_ADDR
*jump_entry
,
1283 CORE_ADDR
*trampoline
,
1284 ULONGEST
*trampoline_size
,
1285 unsigned char *jjump_pad_insn
,
1286 ULONGEST
*jjump_pad_insn_size
,
1287 CORE_ADDR
*adjusted_insn_addr
,
1288 CORE_ADDR
*adjusted_insn_addr_end
,
1291 unsigned char buf
[0x100];
1293 CORE_ADDR buildaddr
= *jump_entry
;
1295 /* Build the jump pad. */
1297 /* First, do tracepoint data collection. Save registers. */
1299 buf
[i
++] = 0x60; /* pushad */
1300 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1301 *((int *)(buf
+ i
)) = (int) tpaddr
;
1303 buf
[i
++] = 0x9c; /* pushf */
1304 buf
[i
++] = 0x1e; /* push %ds */
1305 buf
[i
++] = 0x06; /* push %es */
1306 buf
[i
++] = 0x0f; /* push %fs */
1308 buf
[i
++] = 0x0f; /* push %gs */
1310 buf
[i
++] = 0x16; /* push %ss */
1311 buf
[i
++] = 0x0e; /* push %cs */
1312 append_insns (&buildaddr
, i
, buf
);
1314 /* Stack space for the collecting_t object. */
1316 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1318 /* Build the object. */
1319 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1320 memcpy (buf
+ i
, &tpoint
, 4);
1322 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1324 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1325 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1326 append_insns (&buildaddr
, i
, buf
);
1328 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1329 If we cared for it, this could be using xchg alternatively. */
1332 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1333 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1335 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1337 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1338 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1339 append_insns (&buildaddr
, i
, buf
);
1342 /* Set up arguments to the gdb_collect call. */
1344 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1345 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1346 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1347 append_insns (&buildaddr
, i
, buf
);
1350 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1351 append_insns (&buildaddr
, i
, buf
);
1354 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1355 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1357 append_insns (&buildaddr
, i
, buf
);
1359 buf
[0] = 0xe8; /* call <reladdr> */
1360 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1361 memcpy (buf
+ 1, &offset
, 4);
1362 append_insns (&buildaddr
, 5, buf
);
1363 /* Clean up after the call. */
1364 buf
[0] = 0x83; /* add $0x8,%esp */
1367 append_insns (&buildaddr
, 3, buf
);
1370 /* Clear the spin-lock. This would need the LOCK prefix on older
1373 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1374 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1375 memcpy (buf
+ i
, &lockaddr
, 4);
1377 append_insns (&buildaddr
, i
, buf
);
1380 /* Remove stack that had been used for the collect_t object. */
1382 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1383 append_insns (&buildaddr
, i
, buf
);
1386 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1389 buf
[i
++] = 0x17; /* pop %ss */
1390 buf
[i
++] = 0x0f; /* pop %gs */
1392 buf
[i
++] = 0x0f; /* pop %fs */
1394 buf
[i
++] = 0x07; /* pop %es */
1395 buf
[i
++] = 0x1f; /* pop %ds */
1396 buf
[i
++] = 0x9d; /* popf */
1397 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1400 buf
[i
++] = 0x61; /* popad */
1401 append_insns (&buildaddr
, i
, buf
);
1403 /* Now, adjust the original instruction to execute in the jump
1405 *adjusted_insn_addr
= buildaddr
;
1406 relocate_instruction (&buildaddr
, tpaddr
);
1407 *adjusted_insn_addr_end
= buildaddr
;
1409 /* Write the jump back to the program. */
1410 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1411 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1412 memcpy (buf
+ 1, &offset
, 4);
1413 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1415 /* The jump pad is now built. Wire in a jump to our jump pad. This
1416 is always done last (by our caller actually), so that we can
1417 install fast tracepoints with threads running. This relies on
1418 the agent's atomic write support. */
1421 /* Create a trampoline. */
1422 *trampoline_size
= sizeof (jump_insn
);
1423 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1425 /* No trampoline space available. */
1427 "E.Cannot allocate trampoline space needed for fast "
1428 "tracepoints on 4-byte instructions.");
1432 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1433 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1434 memcpy (buf
+ 1, &offset
, 4);
1435 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1437 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1438 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1439 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1440 memcpy (buf
+ 2, &offset
, 2);
1441 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1442 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1446 /* Else use a 32-bit relative jump instruction. */
1447 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1448 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1449 memcpy (buf
+ 1, &offset
, 4);
1450 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1451 *jjump_pad_insn_size
= sizeof (jump_insn
);
1454 /* Return the end address of our pad. */
1455 *jump_entry
= buildaddr
;
1461 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1462 CORE_ADDR collector
,
1465 CORE_ADDR
*jump_entry
,
1466 CORE_ADDR
*trampoline
,
1467 ULONGEST
*trampoline_size
,
1468 unsigned char *jjump_pad_insn
,
1469 ULONGEST
*jjump_pad_insn_size
,
1470 CORE_ADDR
*adjusted_insn_addr
,
1471 CORE_ADDR
*adjusted_insn_addr_end
,
1475 if (is_64bit_tdesc ())
1476 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1477 collector
, lockaddr
,
1478 orig_size
, jump_entry
,
1479 trampoline
, trampoline_size
,
1481 jjump_pad_insn_size
,
1483 adjusted_insn_addr_end
,
1487 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1488 collector
, lockaddr
,
1489 orig_size
, jump_entry
,
1490 trampoline
, trampoline_size
,
1492 jjump_pad_insn_size
,
1494 adjusted_insn_addr_end
,
1498 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1502 x86_get_min_fast_tracepoint_insn_len (void)
1504 static int warned_about_fast_tracepoints
= 0;
1507 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1508 used for fast tracepoints. */
1509 if (is_64bit_tdesc ())
1513 if (agent_loaded_p ())
1515 char errbuf
[IPA_BUFSIZ
];
1519 /* On x86, if trampolines are available, then 4-byte jump instructions
1520 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1521 with a 4-byte offset are used instead. */
1522 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1526 /* GDB has no channel to explain to user why a shorter fast
1527 tracepoint is not possible, but at least make GDBserver
1528 mention that something has gone awry. */
1529 if (!warned_about_fast_tracepoints
)
1531 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1532 warned_about_fast_tracepoints
= 1;
1539 /* Indicate that the minimum length is currently unknown since the IPA
1540 has not loaded yet. */
1546 add_insns (unsigned char *start
, int len
)
1548 CORE_ADDR buildaddr
= current_insn_ptr
;
1551 debug_printf ("Adding %d bytes of insn at %s\n",
1552 len
, paddress (buildaddr
));
1554 append_insns (&buildaddr
, len
, start
);
1555 current_insn_ptr
= buildaddr
;
1558 /* Our general strategy for emitting code is to avoid specifying raw
1559 bytes whenever possible, and instead copy a block of inline asm
1560 that is embedded in the function. This is a little messy, because
1561 we need to keep the compiler from discarding what looks like dead
1562 code, plus suppress various warnings. */
1564 #define EMIT_ASM(NAME, INSNS) \
1567 extern unsigned char start_ ## NAME, end_ ## NAME; \
1568 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1569 __asm__ ("jmp end_" #NAME "\n" \
1570 "\t" "start_" #NAME ":" \
1572 "\t" "end_" #NAME ":"); \
1577 #define EMIT_ASM32(NAME,INSNS) \
1580 extern unsigned char start_ ## NAME, end_ ## NAME; \
1581 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1582 __asm__ (".code32\n" \
1583 "\t" "jmp end_" #NAME "\n" \
1584 "\t" "start_" #NAME ":\n" \
1586 "\t" "end_" #NAME ":\n" \
1592 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1599 amd64_emit_prologue (void)
1601 EMIT_ASM (amd64_prologue
,
1603 "movq %rsp,%rbp\n\t"
1604 "sub $0x20,%rsp\n\t"
1605 "movq %rdi,-8(%rbp)\n\t"
1606 "movq %rsi,-16(%rbp)");
1611 amd64_emit_epilogue (void)
1613 EMIT_ASM (amd64_epilogue
,
1614 "movq -16(%rbp),%rdi\n\t"
1615 "movq %rax,(%rdi)\n\t"
1622 amd64_emit_add (void)
1624 EMIT_ASM (amd64_add
,
1625 "add (%rsp),%rax\n\t"
1626 "lea 0x8(%rsp),%rsp");
1630 amd64_emit_sub (void)
1632 EMIT_ASM (amd64_sub
,
1633 "sub %rax,(%rsp)\n\t"
1638 amd64_emit_mul (void)
1644 amd64_emit_lsh (void)
1650 amd64_emit_rsh_signed (void)
1656 amd64_emit_rsh_unsigned (void)
1662 amd64_emit_ext (int arg
)
1667 EMIT_ASM (amd64_ext_8
,
1673 EMIT_ASM (amd64_ext_16
,
1678 EMIT_ASM (amd64_ext_32
,
1687 amd64_emit_log_not (void)
1689 EMIT_ASM (amd64_log_not
,
1690 "test %rax,%rax\n\t"
1696 amd64_emit_bit_and (void)
1698 EMIT_ASM (amd64_and
,
1699 "and (%rsp),%rax\n\t"
1700 "lea 0x8(%rsp),%rsp");
1704 amd64_emit_bit_or (void)
1707 "or (%rsp),%rax\n\t"
1708 "lea 0x8(%rsp),%rsp");
1712 amd64_emit_bit_xor (void)
1714 EMIT_ASM (amd64_xor
,
1715 "xor (%rsp),%rax\n\t"
1716 "lea 0x8(%rsp),%rsp");
1720 amd64_emit_bit_not (void)
1722 EMIT_ASM (amd64_bit_not
,
1723 "xorq $0xffffffffffffffff,%rax");
1727 amd64_emit_equal (void)
1729 EMIT_ASM (amd64_equal
,
1730 "cmp %rax,(%rsp)\n\t"
1731 "je .Lamd64_equal_true\n\t"
1733 "jmp .Lamd64_equal_end\n\t"
1734 ".Lamd64_equal_true:\n\t"
1736 ".Lamd64_equal_end:\n\t"
1737 "lea 0x8(%rsp),%rsp");
1741 amd64_emit_less_signed (void)
1743 EMIT_ASM (amd64_less_signed
,
1744 "cmp %rax,(%rsp)\n\t"
1745 "jl .Lamd64_less_signed_true\n\t"
1747 "jmp .Lamd64_less_signed_end\n\t"
1748 ".Lamd64_less_signed_true:\n\t"
1750 ".Lamd64_less_signed_end:\n\t"
1751 "lea 0x8(%rsp),%rsp");
1755 amd64_emit_less_unsigned (void)
1757 EMIT_ASM (amd64_less_unsigned
,
1758 "cmp %rax,(%rsp)\n\t"
1759 "jb .Lamd64_less_unsigned_true\n\t"
1761 "jmp .Lamd64_less_unsigned_end\n\t"
1762 ".Lamd64_less_unsigned_true:\n\t"
1764 ".Lamd64_less_unsigned_end:\n\t"
1765 "lea 0x8(%rsp),%rsp");
1769 amd64_emit_ref (int size
)
1774 EMIT_ASM (amd64_ref1
,
1778 EMIT_ASM (amd64_ref2
,
1782 EMIT_ASM (amd64_ref4
,
1783 "movl (%rax),%eax");
1786 EMIT_ASM (amd64_ref8
,
1787 "movq (%rax),%rax");
1793 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1795 EMIT_ASM (amd64_if_goto
,
1799 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1807 amd64_emit_goto (int *offset_p
, int *size_p
)
1809 EMIT_ASM (amd64_goto
,
1810 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1818 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1820 int diff
= (to
- (from
+ size
));
1821 unsigned char buf
[sizeof (int)];
1829 memcpy (buf
, &diff
, sizeof (int));
1830 write_inferior_memory (from
, buf
, sizeof (int));
1834 amd64_emit_const (LONGEST num
)
1836 unsigned char buf
[16];
1838 CORE_ADDR buildaddr
= current_insn_ptr
;
1841 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1842 memcpy (&buf
[i
], &num
, sizeof (num
));
1844 append_insns (&buildaddr
, i
, buf
);
1845 current_insn_ptr
= buildaddr
;
1849 amd64_emit_call (CORE_ADDR fn
)
1851 unsigned char buf
[16];
1853 CORE_ADDR buildaddr
;
1856 /* The destination function being in the shared library, may be
1857 >31-bits away off the compiled code pad. */
1859 buildaddr
= current_insn_ptr
;
1861 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1865 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1867 /* Offset is too large for a call. Use callq, but that requires
1868 a register, so avoid it if possible. Use r10, since it is
1869 call-clobbered, we don't have to push/pop it. */
1870 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1872 memcpy (buf
+ i
, &fn
, 8);
1874 buf
[i
++] = 0xff; /* callq *%r10 */
1879 int offset32
= offset64
; /* we know we can't overflow here. */
1881 buf
[i
++] = 0xe8; /* call <reladdr> */
1882 memcpy (buf
+ i
, &offset32
, 4);
1886 append_insns (&buildaddr
, i
, buf
);
1887 current_insn_ptr
= buildaddr
;
1891 amd64_emit_reg (int reg
)
1893 unsigned char buf
[16];
1895 CORE_ADDR buildaddr
;
1897 /* Assume raw_regs is still in %rdi. */
1898 buildaddr
= current_insn_ptr
;
1900 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1901 memcpy (&buf
[i
], ®
, sizeof (reg
));
1903 append_insns (&buildaddr
, i
, buf
);
1904 current_insn_ptr
= buildaddr
;
1905 amd64_emit_call (get_raw_reg_func_addr ());
1909 amd64_emit_pop (void)
1911 EMIT_ASM (amd64_pop
,
1916 amd64_emit_stack_flush (void)
1918 EMIT_ASM (amd64_stack_flush
,
1923 amd64_emit_zero_ext (int arg
)
1928 EMIT_ASM (amd64_zero_ext_8
,
1932 EMIT_ASM (amd64_zero_ext_16
,
1933 "and $0xffff,%rax");
1936 EMIT_ASM (amd64_zero_ext_32
,
1937 "mov $0xffffffff,%rcx\n\t"
1946 amd64_emit_swap (void)
1948 EMIT_ASM (amd64_swap
,
1955 amd64_emit_stack_adjust (int n
)
1957 unsigned char buf
[16];
1959 CORE_ADDR buildaddr
= current_insn_ptr
;
1962 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1966 /* This only handles adjustments up to 16, but we don't expect any more. */
1968 append_insns (&buildaddr
, i
, buf
);
1969 current_insn_ptr
= buildaddr
;
1972 /* FN's prototype is `LONGEST(*fn)(int)'. */
1975 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1977 unsigned char buf
[16];
1979 CORE_ADDR buildaddr
;
1981 buildaddr
= current_insn_ptr
;
1983 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1984 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1986 append_insns (&buildaddr
, i
, buf
);
1987 current_insn_ptr
= buildaddr
;
1988 amd64_emit_call (fn
);
1991 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1994 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1996 unsigned char buf
[16];
1998 CORE_ADDR buildaddr
;
2000 buildaddr
= current_insn_ptr
;
2002 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2003 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2005 append_insns (&buildaddr
, i
, buf
);
2006 current_insn_ptr
= buildaddr
;
2007 EMIT_ASM (amd64_void_call_2_a
,
2008 /* Save away a copy of the stack top. */
2010 /* Also pass top as the second argument. */
2012 amd64_emit_call (fn
);
2013 EMIT_ASM (amd64_void_call_2_b
,
2014 /* Restore the stack top, %rax may have been trashed. */
2019 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2022 "cmp %rax,(%rsp)\n\t"
2023 "jne .Lamd64_eq_fallthru\n\t"
2024 "lea 0x8(%rsp),%rsp\n\t"
2026 /* jmp, but don't trust the assembler to choose the right jump */
2027 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2028 ".Lamd64_eq_fallthru:\n\t"
2029 "lea 0x8(%rsp),%rsp\n\t"
2039 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2042 "cmp %rax,(%rsp)\n\t"
2043 "je .Lamd64_ne_fallthru\n\t"
2044 "lea 0x8(%rsp),%rsp\n\t"
2046 /* jmp, but don't trust the assembler to choose the right jump */
2047 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2048 ".Lamd64_ne_fallthru:\n\t"
2049 "lea 0x8(%rsp),%rsp\n\t"
2059 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2062 "cmp %rax,(%rsp)\n\t"
2063 "jnl .Lamd64_lt_fallthru\n\t"
2064 "lea 0x8(%rsp),%rsp\n\t"
2066 /* jmp, but don't trust the assembler to choose the right jump */
2067 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2068 ".Lamd64_lt_fallthru:\n\t"
2069 "lea 0x8(%rsp),%rsp\n\t"
2079 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2082 "cmp %rax,(%rsp)\n\t"
2083 "jnle .Lamd64_le_fallthru\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2086 /* jmp, but don't trust the assembler to choose the right jump */
2087 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2088 ".Lamd64_le_fallthru:\n\t"
2089 "lea 0x8(%rsp),%rsp\n\t"
2099 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2102 "cmp %rax,(%rsp)\n\t"
2103 "jng .Lamd64_gt_fallthru\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2106 /* jmp, but don't trust the assembler to choose the right jump */
2107 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2108 ".Lamd64_gt_fallthru:\n\t"
2109 "lea 0x8(%rsp),%rsp\n\t"
2119 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2122 "cmp %rax,(%rsp)\n\t"
2123 "jnge .Lamd64_ge_fallthru\n\t"
2124 ".Lamd64_ge_jump:\n\t"
2125 "lea 0x8(%rsp),%rsp\n\t"
2127 /* jmp, but don't trust the assembler to choose the right jump */
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2129 ".Lamd64_ge_fallthru:\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2139 struct emit_ops amd64_emit_ops
=
2141 amd64_emit_prologue
,
2142 amd64_emit_epilogue
,
2147 amd64_emit_rsh_signed
,
2148 amd64_emit_rsh_unsigned
,
2156 amd64_emit_less_signed
,
2157 amd64_emit_less_unsigned
,
2161 amd64_write_goto_address
,
2166 amd64_emit_stack_flush
,
2167 amd64_emit_zero_ext
,
2169 amd64_emit_stack_adjust
,
2170 amd64_emit_int_call_1
,
2171 amd64_emit_void_call_2
,
2180 #endif /* __x86_64__ */
2183 i386_emit_prologue (void)
2185 EMIT_ASM32 (i386_prologue
,
2189 /* At this point, the raw regs base address is at 8(%ebp), and the
2190 value pointer is at 12(%ebp). */
2194 i386_emit_epilogue (void)
2196 EMIT_ASM32 (i386_epilogue
,
2197 "mov 12(%ebp),%ecx\n\t"
2198 "mov %eax,(%ecx)\n\t"
2199 "mov %ebx,0x4(%ecx)\n\t"
2207 i386_emit_add (void)
2209 EMIT_ASM32 (i386_add
,
2210 "add (%esp),%eax\n\t"
2211 "adc 0x4(%esp),%ebx\n\t"
2212 "lea 0x8(%esp),%esp");
2216 i386_emit_sub (void)
2218 EMIT_ASM32 (i386_sub
,
2219 "subl %eax,(%esp)\n\t"
2220 "sbbl %ebx,4(%esp)\n\t"
2226 i386_emit_mul (void)
2232 i386_emit_lsh (void)
2238 i386_emit_rsh_signed (void)
2244 i386_emit_rsh_unsigned (void)
2250 i386_emit_ext (int arg
)
2255 EMIT_ASM32 (i386_ext_8
,
2258 "movl %eax,%ebx\n\t"
2262 EMIT_ASM32 (i386_ext_16
,
2264 "movl %eax,%ebx\n\t"
2268 EMIT_ASM32 (i386_ext_32
,
2269 "movl %eax,%ebx\n\t"
2278 i386_emit_log_not (void)
2280 EMIT_ASM32 (i386_log_not
,
2282 "test %eax,%eax\n\t"
2289 i386_emit_bit_and (void)
2291 EMIT_ASM32 (i386_and
,
2292 "and (%esp),%eax\n\t"
2293 "and 0x4(%esp),%ebx\n\t"
2294 "lea 0x8(%esp),%esp");
2298 i386_emit_bit_or (void)
2300 EMIT_ASM32 (i386_or
,
2301 "or (%esp),%eax\n\t"
2302 "or 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2307 i386_emit_bit_xor (void)
2309 EMIT_ASM32 (i386_xor
,
2310 "xor (%esp),%eax\n\t"
2311 "xor 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2316 i386_emit_bit_not (void)
2318 EMIT_ASM32 (i386_bit_not
,
2319 "xor $0xffffffff,%eax\n\t"
2320 "xor $0xffffffff,%ebx\n\t");
2324 i386_emit_equal (void)
2326 EMIT_ASM32 (i386_equal
,
2327 "cmpl %ebx,4(%esp)\n\t"
2328 "jne .Li386_equal_false\n\t"
2329 "cmpl %eax,(%esp)\n\t"
2330 "je .Li386_equal_true\n\t"
2331 ".Li386_equal_false:\n\t"
2333 "jmp .Li386_equal_end\n\t"
2334 ".Li386_equal_true:\n\t"
2336 ".Li386_equal_end:\n\t"
2338 "lea 0x8(%esp),%esp");
2342 i386_emit_less_signed (void)
2344 EMIT_ASM32 (i386_less_signed
,
2345 "cmpl %ebx,4(%esp)\n\t"
2346 "jl .Li386_less_signed_true\n\t"
2347 "jne .Li386_less_signed_false\n\t"
2348 "cmpl %eax,(%esp)\n\t"
2349 "jl .Li386_less_signed_true\n\t"
2350 ".Li386_less_signed_false:\n\t"
2352 "jmp .Li386_less_signed_end\n\t"
2353 ".Li386_less_signed_true:\n\t"
2355 ".Li386_less_signed_end:\n\t"
2357 "lea 0x8(%esp),%esp");
2361 i386_emit_less_unsigned (void)
2363 EMIT_ASM32 (i386_less_unsigned
,
2364 "cmpl %ebx,4(%esp)\n\t"
2365 "jb .Li386_less_unsigned_true\n\t"
2366 "jne .Li386_less_unsigned_false\n\t"
2367 "cmpl %eax,(%esp)\n\t"
2368 "jb .Li386_less_unsigned_true\n\t"
2369 ".Li386_less_unsigned_false:\n\t"
2371 "jmp .Li386_less_unsigned_end\n\t"
2372 ".Li386_less_unsigned_true:\n\t"
2374 ".Li386_less_unsigned_end:\n\t"
2376 "lea 0x8(%esp),%esp");
2380 i386_emit_ref (int size
)
2385 EMIT_ASM32 (i386_ref1
,
2389 EMIT_ASM32 (i386_ref2
,
2393 EMIT_ASM32 (i386_ref4
,
2394 "movl (%eax),%eax");
2397 EMIT_ASM32 (i386_ref8
,
2398 "movl 4(%eax),%ebx\n\t"
2399 "movl (%eax),%eax");
2405 i386_emit_if_goto (int *offset_p
, int *size_p
)
2407 EMIT_ASM32 (i386_if_goto
,
2413 /* Don't trust the assembler to choose the right jump */
2414 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2417 *offset_p
= 11; /* be sure that this matches the sequence above */
2423 i386_emit_goto (int *offset_p
, int *size_p
)
2425 EMIT_ASM32 (i386_goto
,
2426 /* Don't trust the assembler to choose the right jump */
2427 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2435 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2437 int diff
= (to
- (from
+ size
));
2438 unsigned char buf
[sizeof (int)];
2440 /* We're only doing 4-byte sizes at the moment. */
2447 memcpy (buf
, &diff
, sizeof (int));
2448 write_inferior_memory (from
, buf
, sizeof (int));
2452 i386_emit_const (LONGEST num
)
2454 unsigned char buf
[16];
2456 CORE_ADDR buildaddr
= current_insn_ptr
;
2459 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2460 lo
= num
& 0xffffffff;
2461 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2463 hi
= ((num
>> 32) & 0xffffffff);
2466 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2467 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2472 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2474 append_insns (&buildaddr
, i
, buf
);
2475 current_insn_ptr
= buildaddr
;
2479 i386_emit_call (CORE_ADDR fn
)
2481 unsigned char buf
[16];
2483 CORE_ADDR buildaddr
;
2485 buildaddr
= current_insn_ptr
;
2487 buf
[i
++] = 0xe8; /* call <reladdr> */
2488 offset
= ((int) fn
) - (buildaddr
+ 5);
2489 memcpy (buf
+ 1, &offset
, 4);
2490 append_insns (&buildaddr
, 5, buf
);
2491 current_insn_ptr
= buildaddr
;
2495 i386_emit_reg (int reg
)
2497 unsigned char buf
[16];
2499 CORE_ADDR buildaddr
;
2501 EMIT_ASM32 (i386_reg_a
,
2503 buildaddr
= current_insn_ptr
;
2505 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2506 memcpy (&buf
[i
], ®
, sizeof (reg
));
2508 append_insns (&buildaddr
, i
, buf
);
2509 current_insn_ptr
= buildaddr
;
2510 EMIT_ASM32 (i386_reg_b
,
2511 "mov %eax,4(%esp)\n\t"
2512 "mov 8(%ebp),%eax\n\t"
2514 i386_emit_call (get_raw_reg_func_addr ());
2515 EMIT_ASM32 (i386_reg_c
,
2517 "lea 0x8(%esp),%esp");
2521 i386_emit_pop (void)
2523 EMIT_ASM32 (i386_pop
,
2529 i386_emit_stack_flush (void)
2531 EMIT_ASM32 (i386_stack_flush
,
2537 i386_emit_zero_ext (int arg
)
2542 EMIT_ASM32 (i386_zero_ext_8
,
2543 "and $0xff,%eax\n\t"
2547 EMIT_ASM32 (i386_zero_ext_16
,
2548 "and $0xffff,%eax\n\t"
2552 EMIT_ASM32 (i386_zero_ext_32
,
2561 i386_emit_swap (void)
2563 EMIT_ASM32 (i386_swap
,
2573 i386_emit_stack_adjust (int n
)
2575 unsigned char buf
[16];
2577 CORE_ADDR buildaddr
= current_insn_ptr
;
2580 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2584 append_insns (&buildaddr
, i
, buf
);
2585 current_insn_ptr
= buildaddr
;
2588 /* FN's prototype is `LONGEST(*fn)(int)'. */
2591 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2593 unsigned char buf
[16];
2595 CORE_ADDR buildaddr
;
2597 EMIT_ASM32 (i386_int_call_1_a
,
2598 /* Reserve a bit of stack space. */
2600 /* Put the one argument on the stack. */
2601 buildaddr
= current_insn_ptr
;
2603 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2606 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2608 append_insns (&buildaddr
, i
, buf
);
2609 current_insn_ptr
= buildaddr
;
2610 i386_emit_call (fn
);
2611 EMIT_ASM32 (i386_int_call_1_c
,
2613 "lea 0x8(%esp),%esp");
2616 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2619 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2621 unsigned char buf
[16];
2623 CORE_ADDR buildaddr
;
2625 EMIT_ASM32 (i386_void_call_2_a
,
2626 /* Preserve %eax only; we don't have to worry about %ebx. */
2628 /* Reserve a bit of stack space for arguments. */
2629 "sub $0x10,%esp\n\t"
2630 /* Copy "top" to the second argument position. (Note that
2631 we can't assume function won't scribble on its
2632 arguments, so don't try to restore from this.) */
2633 "mov %eax,4(%esp)\n\t"
2634 "mov %ebx,8(%esp)");
2635 /* Put the first argument on the stack. */
2636 buildaddr
= current_insn_ptr
;
2638 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2641 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2643 append_insns (&buildaddr
, i
, buf
);
2644 current_insn_ptr
= buildaddr
;
2645 i386_emit_call (fn
);
2646 EMIT_ASM32 (i386_void_call_2_b
,
2647 "lea 0x10(%esp),%esp\n\t"
2648 /* Restore original stack top. */
2654 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2657 /* Check low half first, more likely to be decider */
2658 "cmpl %eax,(%esp)\n\t"
2659 "jne .Leq_fallthru\n\t"
2660 "cmpl %ebx,4(%esp)\n\t"
2661 "jne .Leq_fallthru\n\t"
2662 "lea 0x8(%esp),%esp\n\t"
2665 /* jmp, but don't trust the assembler to choose the right jump */
2666 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2667 ".Leq_fallthru:\n\t"
2668 "lea 0x8(%esp),%esp\n\t"
2679 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2682 /* Check low half first, more likely to be decider */
2683 "cmpl %eax,(%esp)\n\t"
2685 "cmpl %ebx,4(%esp)\n\t"
2686 "je .Lne_fallthru\n\t"
2688 "lea 0x8(%esp),%esp\n\t"
2691 /* jmp, but don't trust the assembler to choose the right jump */
2692 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2693 ".Lne_fallthru:\n\t"
2694 "lea 0x8(%esp),%esp\n\t"
2705 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2708 "cmpl %ebx,4(%esp)\n\t"
2710 "jne .Llt_fallthru\n\t"
2711 "cmpl %eax,(%esp)\n\t"
2712 "jnl .Llt_fallthru\n\t"
2714 "lea 0x8(%esp),%esp\n\t"
2717 /* jmp, but don't trust the assembler to choose the right jump */
2718 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2719 ".Llt_fallthru:\n\t"
2720 "lea 0x8(%esp),%esp\n\t"
2731 i386_emit_le_goto (int *offset_p
, int *size_p
)
2734 "cmpl %ebx,4(%esp)\n\t"
2736 "jne .Lle_fallthru\n\t"
2737 "cmpl %eax,(%esp)\n\t"
2738 "jnle .Lle_fallthru\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2743 /* jmp, but don't trust the assembler to choose the right jump */
2744 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2745 ".Lle_fallthru:\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2757 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2760 "cmpl %ebx,4(%esp)\n\t"
2762 "jne .Lgt_fallthru\n\t"
2763 "cmpl %eax,(%esp)\n\t"
2764 "jng .Lgt_fallthru\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2769 /* jmp, but don't trust the assembler to choose the right jump */
2770 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2771 ".Lgt_fallthru:\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2783 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2786 "cmpl %ebx,4(%esp)\n\t"
2788 "jne .Lge_fallthru\n\t"
2789 "cmpl %eax,(%esp)\n\t"
2790 "jnge .Lge_fallthru\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2795 /* jmp, but don't trust the assembler to choose the right jump */
2796 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2797 ".Lge_fallthru:\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2808 struct emit_ops i386_emit_ops
=
2816 i386_emit_rsh_signed
,
2817 i386_emit_rsh_unsigned
,
2825 i386_emit_less_signed
,
2826 i386_emit_less_unsigned
,
2830 i386_write_goto_address
,
2835 i386_emit_stack_flush
,
2838 i386_emit_stack_adjust
,
2839 i386_emit_int_call_1
,
2840 i386_emit_void_call_2
,
2850 static struct emit_ops
*
2854 if (is_64bit_tdesc ())
2855 return &amd64_emit_ops
;
2858 return &i386_emit_ops
;
2861 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2863 static const gdb_byte
*
2864 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2866 *size
= x86_breakpoint_len
;
2867 return x86_breakpoint
;
2871 x86_supports_range_stepping (void)
2876 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2880 x86_supports_hardware_single_step (void)
2886 x86_get_ipa_tdesc_idx (void)
2888 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2889 const struct target_desc
*tdesc
= regcache
->tdesc
;
2892 if (tdesc
== tdesc_amd64_linux
|| tdesc
== tdesc_amd64_linux_no_xml
2893 || tdesc
== tdesc_x32_linux
)
2894 return X86_TDESC_SSE
;
2895 if (tdesc
== tdesc_amd64_avx_linux
|| tdesc
== tdesc_x32_avx_linux
)
2896 return X86_TDESC_AVX
;
2897 if (tdesc
== tdesc_amd64_mpx_linux
)
2898 return X86_TDESC_MPX
;
2899 if (tdesc
== tdesc_amd64_avx_mpx_linux
)
2900 return X86_TDESC_AVX_MPX
;
2901 if (tdesc
== tdesc_amd64_avx_mpx_avx512_linux
|| tdesc
== tdesc_x32_avx_mpx_avx512_linux
)
2902 return X86_TDESC_AVX_MPX_AVX512
;
2905 if (tdesc
== tdesc_i386_mmx_linux
)
2906 return X86_TDESC_MMX
;
2907 if (tdesc
== tdesc_i386_linux
|| tdesc
== tdesc_i386_linux_no_xml
)
2908 return X86_TDESC_SSE
;
2909 if (tdesc
== tdesc_i386_avx_linux
)
2910 return X86_TDESC_AVX
;
2911 if (tdesc
== tdesc_i386_mpx_linux
)
2912 return X86_TDESC_MPX
;
2913 if (tdesc
== tdesc_i386_avx_mpx_linux
)
2914 return X86_TDESC_AVX_MPX
;
2915 if (tdesc
== tdesc_i386_avx_mpx_avx512_linux
)
2916 return X86_TDESC_AVX_MPX_AVX512
;
2921 /* This is initialized assuming an amd64 target.
2922 x86_arch_setup will correct it for i386 or amd64 targets. */
2924 struct linux_target_ops the_low_target
=
2927 x86_linux_regs_info
,
2928 x86_cannot_fetch_register
,
2929 x86_cannot_store_register
,
2930 NULL
, /* fetch_register */
2933 NULL
, /* breakpoint_kind_from_pc */
2934 x86_sw_breakpoint_from_kind
,
2938 x86_supports_z_point_type
,
2941 x86_stopped_by_watchpoint
,
2942 x86_stopped_data_address
,
2943 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2944 native i386 case (no registers smaller than an xfer unit), and are not
2945 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2948 /* need to fix up i386 siginfo if host is amd64 */
2950 x86_linux_new_process
,
2951 x86_linux_new_thread
,
2953 x86_linux_prepare_to_resume
,
2954 x86_linux_process_qsupported
,
2955 x86_supports_tracepoints
,
2956 x86_get_thread_area
,
2957 x86_install_fast_tracepoint_jump_pad
,
2959 x86_get_min_fast_tracepoint_insn_len
,
2960 x86_supports_range_stepping
,
2961 NULL
, /* breakpoint_kind_from_current_state */
2962 x86_supports_hardware_single_step
,
2963 x86_get_syscall_trapinfo
,
2964 x86_get_ipa_tdesc_idx
,
2968 initialize_low_arch (void)
2970 /* Initialize the Linux target descriptions. */
2972 init_registers_amd64_linux ();
2973 init_registers_amd64_avx_linux ();
2974 init_registers_amd64_mpx_linux ();
2975 init_registers_amd64_avx_mpx_linux ();
2976 init_registers_amd64_avx_mpx_avx512_linux ();
2978 init_registers_x32_linux ();
2979 init_registers_x32_avx_linux ();
2980 init_registers_x32_avx_mpx_avx512_linux ();
2982 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2983 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
2984 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2986 init_registers_i386_linux ();
2987 init_registers_i386_mmx_linux ();
2988 init_registers_i386_avx_linux ();
2989 init_registers_i386_mpx_linux ();
2990 init_registers_i386_avx_mpx_linux ();
2991 init_registers_i386_avx_mpx_avx512_linux ();
2993 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
2994 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
2995 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2997 initialize_regsets_info (&x86_regsets_info
);