1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
111 void low_arch_setup () override
;
113 bool low_cannot_fetch_register (int regno
) override
;
115 bool low_cannot_store_register (int regno
) override
;
117 bool low_supports_breakpoints () override
;
119 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
121 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
124 /* The singleton target ops object. */
126 static x86_target the_x86_target
;
128 /* Per-process arch-specific data we want to keep. */
130 struct arch_process_info
132 struct x86_debug_reg_state debug_reg_state
;
137 /* Mapping between the general-purpose registers in `struct user'
138 format and GDB's register array layout.
139 Note that the transfer layout uses 64-bit regs. */
140 static /*const*/ int i386_regmap
[] =
142 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
143 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
144 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
145 DS
* 8, ES
* 8, FS
* 8, GS
* 8
148 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
150 /* So code below doesn't have to care, i386 or amd64. */
151 #define ORIG_EAX ORIG_RAX
154 static const int x86_64_regmap
[] =
156 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
157 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
158 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
159 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
160 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
161 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
162 -1, -1, -1, -1, -1, -1, -1, -1,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1,
166 -1, -1, -1, -1, -1, -1, -1, -1,
168 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
173 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
174 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
175 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
180 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, -1, -1, -1, -1,
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188 #define X86_64_USER_REGS (GS + 1)
190 #else /* ! __x86_64__ */
192 /* Mapping between the general-purpose registers in `struct user'
193 format and GDB's register array layout. */
194 static /*const*/ int i386_regmap
[] =
196 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
197 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
198 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
199 DS
* 4, ES
* 4, FS
* 4, GS
* 4
202 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
210 /* Returns true if the current inferior belongs to a x86-64 process,
214 is_64bit_tdesc (void)
216 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
218 return register_size (regcache
->tdesc
, 0) == 8;
224 /* Called by libthread_db. */
227 ps_get_thread_area (struct ps_prochandle
*ph
,
228 lwpid_t lwpid
, int idx
, void **base
)
231 int use_64bit
= is_64bit_tdesc ();
238 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
242 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
253 unsigned int desc
[4];
255 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
256 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
259 /* Ensure we properly extend the value to 64-bits for x86_64. */
260 *base
= (void *) (uintptr_t) desc
[1];
265 /* Get the thread area address. This is used to recognize which
266 thread is which when tracing with the in-process agent library. We
267 don't read anything from the address, and treat it as opaque; it's
268 the address itself that we assume is unique per-thread. */
271 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
274 int use_64bit
= is_64bit_tdesc ();
279 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
281 *addr
= (CORE_ADDR
) (uintptr_t) base
;
290 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
291 struct thread_info
*thr
= get_lwp_thread (lwp
);
292 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
293 unsigned int desc
[4];
295 const int reg_thread_area
= 3; /* bits to scale down register value. */
298 collect_register_by_name (regcache
, "gs", &gs
);
300 idx
= gs
>> reg_thread_area
;
302 if (ptrace (PTRACE_GET_THREAD_AREA
,
304 (void *) (long) idx
, (unsigned long) &desc
) < 0)
315 x86_target::low_cannot_store_register (int regno
)
318 if (is_64bit_tdesc ())
322 return regno
>= I386_NUM_REGS
;
326 x86_target::low_cannot_fetch_register (int regno
)
329 if (is_64bit_tdesc ())
333 return regno
>= I386_NUM_REGS
;
337 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
342 if (register_size (regcache
->tdesc
, 0) == 8)
344 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
345 if (x86_64_regmap
[i
] != -1)
346 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
348 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
351 int lwpid
= lwpid_of (current_thread
);
353 collect_register_by_name (regcache
, "fs_base", &base
);
354 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
356 collect_register_by_name (regcache
, "gs_base", &base
);
357 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
364 /* 32-bit inferior registers need to be zero-extended.
365 Callers would read uninitialized memory otherwise. */
366 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
369 for (i
= 0; i
< I386_NUM_REGS
; i
++)
370 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
372 collect_register_by_name (regcache
, "orig_eax",
373 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
376 /* Sign extend EAX value to avoid potential syscall restart
379 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
380 for a detailed explanation. */
381 if (register_size (regcache
->tdesc
, 0) == 4)
383 void *ptr
= ((gdb_byte
*) buf
384 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
386 *(int64_t *) ptr
= *(int32_t *) ptr
;
392 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
397 if (register_size (regcache
->tdesc
, 0) == 8)
399 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
400 if (x86_64_regmap
[i
] != -1)
401 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
403 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
406 int lwpid
= lwpid_of (current_thread
);
408 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
409 supply_register_by_name (regcache
, "fs_base", &base
);
411 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
412 supply_register_by_name (regcache
, "gs_base", &base
);
419 for (i
= 0; i
< I386_NUM_REGS
; i
++)
420 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
422 supply_register_by_name (regcache
, "orig_eax",
423 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
427 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
430 i387_cache_to_fxsave (regcache
, buf
);
432 i387_cache_to_fsave (regcache
, buf
);
437 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
440 i387_fxsave_to_cache (regcache
, buf
);
442 i387_fsave_to_cache (regcache
, buf
);
449 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
451 i387_cache_to_fxsave (regcache
, buf
);
455 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
457 i387_fxsave_to_cache (regcache
, buf
);
463 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
465 i387_cache_to_xsave (regcache
, buf
);
469 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
471 i387_xsave_to_cache (regcache
, buf
);
474 /* ??? The non-biarch i386 case stores all the i387 regs twice.
475 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
476 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
477 doesn't work. IWBN to avoid the duplication in the case where it
478 does work. Maybe the arch_setup routine could check whether it works
479 and update the supported regsets accordingly. */
481 static struct regset_info x86_regsets
[] =
483 #ifdef HAVE_PTRACE_GETREGS
484 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
486 x86_fill_gregset
, x86_store_gregset
},
487 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
488 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
490 # ifdef HAVE_PTRACE_GETFPXREGS
491 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
493 x86_fill_fpxregset
, x86_store_fpxregset
},
496 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
498 x86_fill_fpregset
, x86_store_fpregset
},
499 #endif /* HAVE_PTRACE_GETREGS */
504 x86_target::low_supports_breakpoints ()
510 x86_target::low_get_pc (regcache
*regcache
)
512 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
518 collect_register_by_name (regcache
, "rip", &pc
);
519 return (CORE_ADDR
) pc
;
525 collect_register_by_name (regcache
, "eip", &pc
);
526 return (CORE_ADDR
) pc
;
531 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
533 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
539 supply_register_by_name (regcache
, "rip", &newpc
);
545 supply_register_by_name (regcache
, "eip", &newpc
);
549 static const gdb_byte x86_breakpoint
[] = { 0xCC };
550 #define x86_breakpoint_len 1
553 x86_breakpoint_at (CORE_ADDR pc
)
557 the_target
->read_memory (pc
, &c
, 1);
564 /* Low-level function vector. */
565 struct x86_dr_low_type x86_dr_low
=
567 x86_linux_dr_set_control
,
568 x86_linux_dr_set_addr
,
569 x86_linux_dr_get_addr
,
570 x86_linux_dr_get_status
,
571 x86_linux_dr_get_control
,
575 /* Breakpoint/Watchpoint support. */
578 x86_supports_z_point_type (char z_type
)
584 case Z_PACKET_WRITE_WP
:
585 case Z_PACKET_ACCESS_WP
:
593 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
594 int size
, struct raw_breakpoint
*bp
)
596 struct process_info
*proc
= current_process ();
600 case raw_bkpt_type_hw
:
601 case raw_bkpt_type_write_wp
:
602 case raw_bkpt_type_access_wp
:
604 enum target_hw_bp_type hw_type
605 = raw_bkpt_type_to_target_hw_bp_type (type
);
606 struct x86_debug_reg_state
*state
607 = &proc
->priv
->arch_private
->debug_reg_state
;
609 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
619 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
620 int size
, struct raw_breakpoint
*bp
)
622 struct process_info
*proc
= current_process ();
626 case raw_bkpt_type_hw
:
627 case raw_bkpt_type_write_wp
:
628 case raw_bkpt_type_access_wp
:
630 enum target_hw_bp_type hw_type
631 = raw_bkpt_type_to_target_hw_bp_type (type
);
632 struct x86_debug_reg_state
*state
633 = &proc
->priv
->arch_private
->debug_reg_state
;
635 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
644 x86_stopped_by_watchpoint (void)
646 struct process_info
*proc
= current_process ();
647 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
651 x86_stopped_data_address (void)
653 struct process_info
*proc
= current_process ();
655 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
661 /* Called when a new process is created. */
663 static struct arch_process_info
*
664 x86_linux_new_process (void)
666 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
668 x86_low_init_dregs (&info
->debug_reg_state
);
673 /* Called when a process is being deleted. */
676 x86_linux_delete_process (struct arch_process_info
*info
)
681 /* Target routine for linux_new_fork. */
684 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
686 /* These are allocated by linux_add_process. */
687 gdb_assert (parent
->priv
!= NULL
688 && parent
->priv
->arch_private
!= NULL
);
689 gdb_assert (child
->priv
!= NULL
690 && child
->priv
->arch_private
!= NULL
);
692 /* Linux kernel before 2.6.33 commit
693 72f674d203cd230426437cdcf7dd6f681dad8b0d
694 will inherit hardware debug registers from parent
695 on fork/vfork/clone. Newer Linux kernels create such tasks with
696 zeroed debug registers.
698 GDB core assumes the child inherits the watchpoints/hw
699 breakpoints of the parent, and will remove them all from the
700 forked off process. Copy the debug registers mirrors into the
701 new process so that all breakpoints and watchpoints can be
702 removed together. The debug registers mirror will become zeroed
703 in the end before detaching the forked off process, thus making
704 this compatible with older Linux kernels too. */
706 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
709 /* See nat/x86-dregs.h. */
711 struct x86_debug_reg_state
*
712 x86_debug_reg_state (pid_t pid
)
714 struct process_info
*proc
= find_process_pid (pid
);
716 return &proc
->priv
->arch_private
->debug_reg_state
;
719 /* When GDBSERVER is built as a 64-bit application on linux, the
720 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
721 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
722 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
723 conversion in-place ourselves. */
725 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
726 layout of the inferiors' architecture. Returns true if any
727 conversion was done; false otherwise. If DIRECTION is 1, then copy
728 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
732 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
735 unsigned int machine
;
736 int tid
= lwpid_of (current_thread
);
737 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
739 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
740 if (!is_64bit_tdesc ())
741 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
743 /* No fixup for native x32 GDB. */
744 else if (!is_elf64
&& sizeof (void *) == 8)
745 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
754 /* Format of XSAVE extended state is:
758 sw_usable_bytes[464..511]
759 xstate_hdr_bytes[512..575]
764 Same memory layout will be used for the coredump NT_X86_XSTATE
765 representing the XSAVE extended state registers.
767 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
768 extended state mask, which is the same as the extended control register
769 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
770 together with the mask saved in the xstate_hdr_bytes to determine what
771 states the processor/OS supports and what state, used or initialized,
772 the process/thread is in. */
773 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
775 /* Does the current host support the GETFPXREGS request? The header
776 file may or may not define it, and even if it is defined, the
777 kernel will return EIO if it's running on a pre-SSE processor. */
778 int have_ptrace_getfpxregs
=
779 #ifdef HAVE_PTRACE_GETFPXREGS
786 /* Get Linux/x86 target description from running target. */
788 static const struct target_desc
*
789 x86_linux_read_description (void)
791 unsigned int machine
;
795 static uint64_t xcr0
;
796 struct regset_info
*regset
;
798 tid
= lwpid_of (current_thread
);
800 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
802 if (sizeof (void *) == 4)
805 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
807 else if (machine
== EM_X86_64
)
808 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
812 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
813 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
815 elf_fpxregset_t fpxregs
;
817 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
819 have_ptrace_getfpxregs
= 0;
820 have_ptrace_getregset
= 0;
821 return i386_linux_read_description (X86_XSTATE_X87
);
824 have_ptrace_getfpxregs
= 1;
830 x86_xcr0
= X86_XSTATE_SSE_MASK
;
834 if (machine
== EM_X86_64
)
835 return tdesc_amd64_linux_no_xml
;
838 return tdesc_i386_linux_no_xml
;
841 if (have_ptrace_getregset
== -1)
843 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
846 iov
.iov_base
= xstateregs
;
847 iov
.iov_len
= sizeof (xstateregs
);
849 /* Check if PTRACE_GETREGSET works. */
850 if (ptrace (PTRACE_GETREGSET
, tid
,
851 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
852 have_ptrace_getregset
= 0;
855 have_ptrace_getregset
= 1;
857 /* Get XCR0 from XSAVE extended state. */
858 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
859 / sizeof (uint64_t))];
861 /* Use PTRACE_GETREGSET if it is available. */
862 for (regset
= x86_regsets
;
863 regset
->fill_function
!= NULL
; regset
++)
864 if (regset
->get_request
== PTRACE_GETREGSET
)
865 regset
->size
= X86_XSTATE_SIZE (xcr0
);
866 else if (regset
->type
!= GENERAL_REGS
)
871 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
872 xcr0_features
= (have_ptrace_getregset
873 && (xcr0
& X86_XSTATE_ALL_MASK
));
878 if (machine
== EM_X86_64
)
881 const target_desc
*tdesc
= NULL
;
885 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
890 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
896 const target_desc
*tdesc
= NULL
;
899 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
902 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
907 gdb_assert_not_reached ("failed to return tdesc");
910 /* Update all the target description of all processes; a new GDB
911 connected, and it may or not support xml target descriptions. */
914 x86_target::update_xmltarget ()
916 struct thread_info
*saved_thread
= current_thread
;
918 /* Before changing the register cache's internal layout, flush the
919 contents of the current valid caches back to the threads, and
920 release the current regcache objects. */
923 for_each_process ([this] (process_info
*proc
) {
926 /* Look up any thread of this process. */
927 current_thread
= find_any_thread_of_pid (pid
);
932 current_thread
= saved_thread
;
935 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
939 x86_linux_process_qsupported (char **features
, int count
)
943 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
944 with "i386" in qSupported query, it supports x86 XML target
947 for (i
= 0; i
< count
; i
++)
949 const char *feature
= features
[i
];
951 if (startswith (feature
, "xmlRegisters="))
953 char *copy
= xstrdup (feature
+ 13);
956 for (char *p
= strtok_r (copy
, ",", &saveptr
);
958 p
= strtok_r (NULL
, ",", &saveptr
))
960 if (strcmp (p
, "i386") == 0)
970 the_x86_target
.update_xmltarget ();
973 /* Common for x86/x86-64. */
975 static struct regsets_info x86_regsets_info
=
977 x86_regsets
, /* regsets */
979 NULL
, /* disabled_regsets */
983 static struct regs_info amd64_linux_regs_info
=
985 NULL
, /* regset_bitmap */
986 NULL
, /* usrregs_info */
990 static struct usrregs_info i386_linux_usrregs_info
=
996 static struct regs_info i386_linux_regs_info
=
998 NULL
, /* regset_bitmap */
999 &i386_linux_usrregs_info
,
1004 x86_target::get_regs_info ()
1007 if (is_64bit_tdesc ())
1008 return &amd64_linux_regs_info
;
1011 return &i386_linux_regs_info
;
1014 /* Initialize the target description for the architecture of the
1018 x86_target::low_arch_setup ()
1020 current_process ()->tdesc
= x86_linux_read_description ();
1023 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1024 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1027 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1029 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1035 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1036 *sysno
= (int) l_sysno
;
1039 collect_register_by_name (regcache
, "orig_eax", sysno
);
1043 x86_supports_tracepoints (void)
1049 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1051 target_write_memory (*to
, buf
, len
);
1056 push_opcode (unsigned char *buf
, const char *op
)
1058 unsigned char *buf_org
= buf
;
1063 unsigned long ul
= strtoul (op
, &endptr
, 16);
1072 return buf
- buf_org
;
1077 /* Build a jump pad that saves registers and calls a collection
1078 function. Writes a jump instruction to the jump pad to
1079 JJUMPAD_INSN. The caller is responsible to write it in at the
1080 tracepoint address. */
1083 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1084 CORE_ADDR collector
,
1087 CORE_ADDR
*jump_entry
,
1088 CORE_ADDR
*trampoline
,
1089 ULONGEST
*trampoline_size
,
1090 unsigned char *jjump_pad_insn
,
1091 ULONGEST
*jjump_pad_insn_size
,
1092 CORE_ADDR
*adjusted_insn_addr
,
1093 CORE_ADDR
*adjusted_insn_addr_end
,
1096 unsigned char buf
[40];
1100 CORE_ADDR buildaddr
= *jump_entry
;
1102 /* Build the jump pad. */
1104 /* First, do tracepoint data collection. Save registers. */
1106 /* Need to ensure stack pointer saved first. */
1107 buf
[i
++] = 0x54; /* push %rsp */
1108 buf
[i
++] = 0x55; /* push %rbp */
1109 buf
[i
++] = 0x57; /* push %rdi */
1110 buf
[i
++] = 0x56; /* push %rsi */
1111 buf
[i
++] = 0x52; /* push %rdx */
1112 buf
[i
++] = 0x51; /* push %rcx */
1113 buf
[i
++] = 0x53; /* push %rbx */
1114 buf
[i
++] = 0x50; /* push %rax */
1115 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1116 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1117 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1118 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1119 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1120 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1121 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1122 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1123 buf
[i
++] = 0x9c; /* pushfq */
1124 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1126 memcpy (buf
+ i
, &tpaddr
, 8);
1128 buf
[i
++] = 0x57; /* push %rdi */
1129 append_insns (&buildaddr
, i
, buf
);
1131 /* Stack space for the collecting_t object. */
1133 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1134 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1135 memcpy (buf
+ i
, &tpoint
, 8);
1137 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1138 i
+= push_opcode (&buf
[i
],
1139 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1140 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1141 append_insns (&buildaddr
, i
, buf
);
1145 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1146 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1148 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1149 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1150 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1151 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1152 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1153 append_insns (&buildaddr
, i
, buf
);
1155 /* Set up the gdb_collect call. */
1156 /* At this point, (stack pointer + 0x18) is the base of our saved
1160 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1161 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1163 /* tpoint address may be 64-bit wide. */
1164 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1165 memcpy (buf
+ i
, &tpoint
, 8);
1167 append_insns (&buildaddr
, i
, buf
);
1169 /* The collector function being in the shared library, may be
1170 >31-bits away off the jump pad. */
1172 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1173 memcpy (buf
+ i
, &collector
, 8);
1175 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1176 append_insns (&buildaddr
, i
, buf
);
1178 /* Clear the spin-lock. */
1180 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1181 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1182 memcpy (buf
+ i
, &lockaddr
, 8);
1184 append_insns (&buildaddr
, i
, buf
);
1186 /* Remove stack that had been used for the collect_t object. */
1188 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1189 append_insns (&buildaddr
, i
, buf
);
1191 /* Restore register state. */
1193 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1197 buf
[i
++] = 0x9d; /* popfq */
1198 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1199 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1200 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1206 buf
[i
++] = 0x58; /* pop %rax */
1207 buf
[i
++] = 0x5b; /* pop %rbx */
1208 buf
[i
++] = 0x59; /* pop %rcx */
1209 buf
[i
++] = 0x5a; /* pop %rdx */
1210 buf
[i
++] = 0x5e; /* pop %rsi */
1211 buf
[i
++] = 0x5f; /* pop %rdi */
1212 buf
[i
++] = 0x5d; /* pop %rbp */
1213 buf
[i
++] = 0x5c; /* pop %rsp */
1214 append_insns (&buildaddr
, i
, buf
);
1216 /* Now, adjust the original instruction to execute in the jump
1218 *adjusted_insn_addr
= buildaddr
;
1219 relocate_instruction (&buildaddr
, tpaddr
);
1220 *adjusted_insn_addr_end
= buildaddr
;
1222 /* Finally, write a jump back to the program. */
1224 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1225 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1228 "E.Jump back from jump pad too far from tracepoint "
1229 "(offset 0x%" PRIx64
" > int32).", loffset
);
1233 offset
= (int) loffset
;
1234 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1235 memcpy (buf
+ 1, &offset
, 4);
1236 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1238 /* The jump pad is now built. Wire in a jump to our jump pad. This
1239 is always done last (by our caller actually), so that we can
1240 install fast tracepoints with threads running. This relies on
1241 the agent's atomic write support. */
1242 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1243 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1246 "E.Jump pad too far from tracepoint "
1247 "(offset 0x%" PRIx64
" > int32).", loffset
);
1251 offset
= (int) loffset
;
1253 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1254 memcpy (buf
+ 1, &offset
, 4);
1255 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1256 *jjump_pad_insn_size
= sizeof (jump_insn
);
1258 /* Return the end address of our pad. */
1259 *jump_entry
= buildaddr
;
1264 #endif /* __x86_64__ */
1266 /* Build a jump pad that saves registers and calls a collection
1267 function. Writes a jump instruction to the jump pad to
1268 JJUMPAD_INSN. The caller is responsible to write it in at the
1269 tracepoint address. */
1272 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1273 CORE_ADDR collector
,
1276 CORE_ADDR
*jump_entry
,
1277 CORE_ADDR
*trampoline
,
1278 ULONGEST
*trampoline_size
,
1279 unsigned char *jjump_pad_insn
,
1280 ULONGEST
*jjump_pad_insn_size
,
1281 CORE_ADDR
*adjusted_insn_addr
,
1282 CORE_ADDR
*adjusted_insn_addr_end
,
1285 unsigned char buf
[0x100];
1287 CORE_ADDR buildaddr
= *jump_entry
;
1289 /* Build the jump pad. */
1291 /* First, do tracepoint data collection. Save registers. */
1293 buf
[i
++] = 0x60; /* pushad */
1294 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1295 *((int *)(buf
+ i
)) = (int) tpaddr
;
1297 buf
[i
++] = 0x9c; /* pushf */
1298 buf
[i
++] = 0x1e; /* push %ds */
1299 buf
[i
++] = 0x06; /* push %es */
1300 buf
[i
++] = 0x0f; /* push %fs */
1302 buf
[i
++] = 0x0f; /* push %gs */
1304 buf
[i
++] = 0x16; /* push %ss */
1305 buf
[i
++] = 0x0e; /* push %cs */
1306 append_insns (&buildaddr
, i
, buf
);
1308 /* Stack space for the collecting_t object. */
1310 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1312 /* Build the object. */
1313 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1314 memcpy (buf
+ i
, &tpoint
, 4);
1316 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1318 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1319 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1320 append_insns (&buildaddr
, i
, buf
);
1322 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1323 If we cared for it, this could be using xchg alternatively. */
1326 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1327 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1329 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1331 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1332 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1333 append_insns (&buildaddr
, i
, buf
);
1336 /* Set up arguments to the gdb_collect call. */
1338 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1339 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1340 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1341 append_insns (&buildaddr
, i
, buf
);
1344 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1345 append_insns (&buildaddr
, i
, buf
);
1348 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1349 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1351 append_insns (&buildaddr
, i
, buf
);
1353 buf
[0] = 0xe8; /* call <reladdr> */
1354 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1355 memcpy (buf
+ 1, &offset
, 4);
1356 append_insns (&buildaddr
, 5, buf
);
1357 /* Clean up after the call. */
1358 buf
[0] = 0x83; /* add $0x8,%esp */
1361 append_insns (&buildaddr
, 3, buf
);
1364 /* Clear the spin-lock. This would need the LOCK prefix on older
1367 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1368 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1369 memcpy (buf
+ i
, &lockaddr
, 4);
1371 append_insns (&buildaddr
, i
, buf
);
1374 /* Remove stack that had been used for the collect_t object. */
1376 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1377 append_insns (&buildaddr
, i
, buf
);
1380 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1383 buf
[i
++] = 0x17; /* pop %ss */
1384 buf
[i
++] = 0x0f; /* pop %gs */
1386 buf
[i
++] = 0x0f; /* pop %fs */
1388 buf
[i
++] = 0x07; /* pop %es */
1389 buf
[i
++] = 0x1f; /* pop %ds */
1390 buf
[i
++] = 0x9d; /* popf */
1391 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1394 buf
[i
++] = 0x61; /* popad */
1395 append_insns (&buildaddr
, i
, buf
);
1397 /* Now, adjust the original instruction to execute in the jump
1399 *adjusted_insn_addr
= buildaddr
;
1400 relocate_instruction (&buildaddr
, tpaddr
);
1401 *adjusted_insn_addr_end
= buildaddr
;
1403 /* Write the jump back to the program. */
1404 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1405 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1406 memcpy (buf
+ 1, &offset
, 4);
1407 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1409 /* The jump pad is now built. Wire in a jump to our jump pad. This
1410 is always done last (by our caller actually), so that we can
1411 install fast tracepoints with threads running. This relies on
1412 the agent's atomic write support. */
1415 /* Create a trampoline. */
1416 *trampoline_size
= sizeof (jump_insn
);
1417 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1419 /* No trampoline space available. */
1421 "E.Cannot allocate trampoline space needed for fast "
1422 "tracepoints on 4-byte instructions.");
1426 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1427 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1428 memcpy (buf
+ 1, &offset
, 4);
1429 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1431 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1432 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1433 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1434 memcpy (buf
+ 2, &offset
, 2);
1435 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1436 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1440 /* Else use a 32-bit relative jump instruction. */
1441 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1442 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1443 memcpy (buf
+ 1, &offset
, 4);
1444 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1445 *jjump_pad_insn_size
= sizeof (jump_insn
);
1448 /* Return the end address of our pad. */
1449 *jump_entry
= buildaddr
;
1455 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1456 CORE_ADDR collector
,
1459 CORE_ADDR
*jump_entry
,
1460 CORE_ADDR
*trampoline
,
1461 ULONGEST
*trampoline_size
,
1462 unsigned char *jjump_pad_insn
,
1463 ULONGEST
*jjump_pad_insn_size
,
1464 CORE_ADDR
*adjusted_insn_addr
,
1465 CORE_ADDR
*adjusted_insn_addr_end
,
1469 if (is_64bit_tdesc ())
1470 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1471 collector
, lockaddr
,
1472 orig_size
, jump_entry
,
1473 trampoline
, trampoline_size
,
1475 jjump_pad_insn_size
,
1477 adjusted_insn_addr_end
,
1481 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1482 collector
, lockaddr
,
1483 orig_size
, jump_entry
,
1484 trampoline
, trampoline_size
,
1486 jjump_pad_insn_size
,
1488 adjusted_insn_addr_end
,
1492 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1496 x86_get_min_fast_tracepoint_insn_len (void)
1498 static int warned_about_fast_tracepoints
= 0;
1501 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1502 used for fast tracepoints. */
1503 if (is_64bit_tdesc ())
1507 if (agent_loaded_p ())
1509 char errbuf
[IPA_BUFSIZ
];
1513 /* On x86, if trampolines are available, then 4-byte jump instructions
1514 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1515 with a 4-byte offset are used instead. */
1516 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1520 /* GDB has no channel to explain to user why a shorter fast
1521 tracepoint is not possible, but at least make GDBserver
1522 mention that something has gone awry. */
1523 if (!warned_about_fast_tracepoints
)
1525 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1526 warned_about_fast_tracepoints
= 1;
1533 /* Indicate that the minimum length is currently unknown since the IPA
1534 has not loaded yet. */
1540 add_insns (unsigned char *start
, int len
)
1542 CORE_ADDR buildaddr
= current_insn_ptr
;
1545 debug_printf ("Adding %d bytes of insn at %s\n",
1546 len
, paddress (buildaddr
));
1548 append_insns (&buildaddr
, len
, start
);
1549 current_insn_ptr
= buildaddr
;
1552 /* Our general strategy for emitting code is to avoid specifying raw
1553 bytes whenever possible, and instead copy a block of inline asm
1554 that is embedded in the function. This is a little messy, because
1555 we need to keep the compiler from discarding what looks like dead
1556 code, plus suppress various warnings. */
1558 #define EMIT_ASM(NAME, INSNS) \
1561 extern unsigned char start_ ## NAME, end_ ## NAME; \
1562 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1563 __asm__ ("jmp end_" #NAME "\n" \
1564 "\t" "start_" #NAME ":" \
1566 "\t" "end_" #NAME ":"); \
1571 #define EMIT_ASM32(NAME,INSNS) \
1574 extern unsigned char start_ ## NAME, end_ ## NAME; \
1575 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1576 __asm__ (".code32\n" \
1577 "\t" "jmp end_" #NAME "\n" \
1578 "\t" "start_" #NAME ":\n" \
1580 "\t" "end_" #NAME ":\n" \
1586 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1593 amd64_emit_prologue (void)
1595 EMIT_ASM (amd64_prologue
,
1597 "movq %rsp,%rbp\n\t"
1598 "sub $0x20,%rsp\n\t"
1599 "movq %rdi,-8(%rbp)\n\t"
1600 "movq %rsi,-16(%rbp)");
1605 amd64_emit_epilogue (void)
1607 EMIT_ASM (amd64_epilogue
,
1608 "movq -16(%rbp),%rdi\n\t"
1609 "movq %rax,(%rdi)\n\t"
1616 amd64_emit_add (void)
1618 EMIT_ASM (amd64_add
,
1619 "add (%rsp),%rax\n\t"
1620 "lea 0x8(%rsp),%rsp");
1624 amd64_emit_sub (void)
1626 EMIT_ASM (amd64_sub
,
1627 "sub %rax,(%rsp)\n\t"
1632 amd64_emit_mul (void)
1638 amd64_emit_lsh (void)
1644 amd64_emit_rsh_signed (void)
1650 amd64_emit_rsh_unsigned (void)
1656 amd64_emit_ext (int arg
)
1661 EMIT_ASM (amd64_ext_8
,
1667 EMIT_ASM (amd64_ext_16
,
1672 EMIT_ASM (amd64_ext_32
,
1681 amd64_emit_log_not (void)
1683 EMIT_ASM (amd64_log_not
,
1684 "test %rax,%rax\n\t"
1690 amd64_emit_bit_and (void)
1692 EMIT_ASM (amd64_and
,
1693 "and (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1698 amd64_emit_bit_or (void)
1701 "or (%rsp),%rax\n\t"
1702 "lea 0x8(%rsp),%rsp");
1706 amd64_emit_bit_xor (void)
1708 EMIT_ASM (amd64_xor
,
1709 "xor (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1714 amd64_emit_bit_not (void)
1716 EMIT_ASM (amd64_bit_not
,
1717 "xorq $0xffffffffffffffff,%rax");
1721 amd64_emit_equal (void)
1723 EMIT_ASM (amd64_equal
,
1724 "cmp %rax,(%rsp)\n\t"
1725 "je .Lamd64_equal_true\n\t"
1727 "jmp .Lamd64_equal_end\n\t"
1728 ".Lamd64_equal_true:\n\t"
1730 ".Lamd64_equal_end:\n\t"
1731 "lea 0x8(%rsp),%rsp");
1735 amd64_emit_less_signed (void)
1737 EMIT_ASM (amd64_less_signed
,
1738 "cmp %rax,(%rsp)\n\t"
1739 "jl .Lamd64_less_signed_true\n\t"
1741 "jmp .Lamd64_less_signed_end\n\t"
1742 ".Lamd64_less_signed_true:\n\t"
1744 ".Lamd64_less_signed_end:\n\t"
1745 "lea 0x8(%rsp),%rsp");
1749 amd64_emit_less_unsigned (void)
1751 EMIT_ASM (amd64_less_unsigned
,
1752 "cmp %rax,(%rsp)\n\t"
1753 "jb .Lamd64_less_unsigned_true\n\t"
1755 "jmp .Lamd64_less_unsigned_end\n\t"
1756 ".Lamd64_less_unsigned_true:\n\t"
1758 ".Lamd64_less_unsigned_end:\n\t"
1759 "lea 0x8(%rsp),%rsp");
1763 amd64_emit_ref (int size
)
1768 EMIT_ASM (amd64_ref1
,
1772 EMIT_ASM (amd64_ref2
,
1776 EMIT_ASM (amd64_ref4
,
1777 "movl (%rax),%eax");
1780 EMIT_ASM (amd64_ref8
,
1781 "movq (%rax),%rax");
1787 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1789 EMIT_ASM (amd64_if_goto
,
1793 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1801 amd64_emit_goto (int *offset_p
, int *size_p
)
1803 EMIT_ASM (amd64_goto
,
1804 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1812 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1814 int diff
= (to
- (from
+ size
));
1815 unsigned char buf
[sizeof (int)];
1823 memcpy (buf
, &diff
, sizeof (int));
1824 target_write_memory (from
, buf
, sizeof (int));
1828 amd64_emit_const (LONGEST num
)
1830 unsigned char buf
[16];
1832 CORE_ADDR buildaddr
= current_insn_ptr
;
1835 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1836 memcpy (&buf
[i
], &num
, sizeof (num
));
1838 append_insns (&buildaddr
, i
, buf
);
1839 current_insn_ptr
= buildaddr
;
1843 amd64_emit_call (CORE_ADDR fn
)
1845 unsigned char buf
[16];
1847 CORE_ADDR buildaddr
;
1850 /* The destination function being in the shared library, may be
1851 >31-bits away off the compiled code pad. */
1853 buildaddr
= current_insn_ptr
;
1855 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1859 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1861 /* Offset is too large for a call. Use callq, but that requires
1862 a register, so avoid it if possible. Use r10, since it is
1863 call-clobbered, we don't have to push/pop it. */
1864 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1866 memcpy (buf
+ i
, &fn
, 8);
1868 buf
[i
++] = 0xff; /* callq *%r10 */
1873 int offset32
= offset64
; /* we know we can't overflow here. */
1875 buf
[i
++] = 0xe8; /* call <reladdr> */
1876 memcpy (buf
+ i
, &offset32
, 4);
1880 append_insns (&buildaddr
, i
, buf
);
1881 current_insn_ptr
= buildaddr
;
1885 amd64_emit_reg (int reg
)
1887 unsigned char buf
[16];
1889 CORE_ADDR buildaddr
;
1891 /* Assume raw_regs is still in %rdi. */
1892 buildaddr
= current_insn_ptr
;
1894 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1895 memcpy (&buf
[i
], ®
, sizeof (reg
));
1897 append_insns (&buildaddr
, i
, buf
);
1898 current_insn_ptr
= buildaddr
;
1899 amd64_emit_call (get_raw_reg_func_addr ());
1903 amd64_emit_pop (void)
1905 EMIT_ASM (amd64_pop
,
1910 amd64_emit_stack_flush (void)
1912 EMIT_ASM (amd64_stack_flush
,
1917 amd64_emit_zero_ext (int arg
)
1922 EMIT_ASM (amd64_zero_ext_8
,
1926 EMIT_ASM (amd64_zero_ext_16
,
1927 "and $0xffff,%rax");
1930 EMIT_ASM (amd64_zero_ext_32
,
1931 "mov $0xffffffff,%rcx\n\t"
1940 amd64_emit_swap (void)
1942 EMIT_ASM (amd64_swap
,
1949 amd64_emit_stack_adjust (int n
)
1951 unsigned char buf
[16];
1953 CORE_ADDR buildaddr
= current_insn_ptr
;
1956 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1960 /* This only handles adjustments up to 16, but we don't expect any more. */
1962 append_insns (&buildaddr
, i
, buf
);
1963 current_insn_ptr
= buildaddr
;
1966 /* FN's prototype is `LONGEST(*fn)(int)'. */
1969 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1971 unsigned char buf
[16];
1973 CORE_ADDR buildaddr
;
1975 buildaddr
= current_insn_ptr
;
1977 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1978 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1980 append_insns (&buildaddr
, i
, buf
);
1981 current_insn_ptr
= buildaddr
;
1982 amd64_emit_call (fn
);
1985 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1988 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1990 unsigned char buf
[16];
1992 CORE_ADDR buildaddr
;
1994 buildaddr
= current_insn_ptr
;
1996 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1997 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1999 append_insns (&buildaddr
, i
, buf
);
2000 current_insn_ptr
= buildaddr
;
2001 EMIT_ASM (amd64_void_call_2_a
,
2002 /* Save away a copy of the stack top. */
2004 /* Also pass top as the second argument. */
2006 amd64_emit_call (fn
);
2007 EMIT_ASM (amd64_void_call_2_b
,
2008 /* Restore the stack top, %rax may have been trashed. */
2013 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2016 "cmp %rax,(%rsp)\n\t"
2017 "jne .Lamd64_eq_fallthru\n\t"
2018 "lea 0x8(%rsp),%rsp\n\t"
2020 /* jmp, but don't trust the assembler to choose the right jump */
2021 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2022 ".Lamd64_eq_fallthru:\n\t"
2023 "lea 0x8(%rsp),%rsp\n\t"
2033 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2036 "cmp %rax,(%rsp)\n\t"
2037 "je .Lamd64_ne_fallthru\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2040 /* jmp, but don't trust the assembler to choose the right jump */
2041 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2042 ".Lamd64_ne_fallthru:\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2053 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2056 "cmp %rax,(%rsp)\n\t"
2057 "jnl .Lamd64_lt_fallthru\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2060 /* jmp, but don't trust the assembler to choose the right jump */
2061 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2062 ".Lamd64_lt_fallthru:\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2073 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2076 "cmp %rax,(%rsp)\n\t"
2077 "jnle .Lamd64_le_fallthru\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2080 /* jmp, but don't trust the assembler to choose the right jump */
2081 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2082 ".Lamd64_le_fallthru:\n\t"
2083 "lea 0x8(%rsp),%rsp\n\t"
2093 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2096 "cmp %rax,(%rsp)\n\t"
2097 "jng .Lamd64_gt_fallthru\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2100 /* jmp, but don't trust the assembler to choose the right jump */
2101 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2102 ".Lamd64_gt_fallthru:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2113 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2116 "cmp %rax,(%rsp)\n\t"
2117 "jnge .Lamd64_ge_fallthru\n\t"
2118 ".Lamd64_ge_jump:\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_ge_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2133 struct emit_ops amd64_emit_ops
=
2135 amd64_emit_prologue
,
2136 amd64_emit_epilogue
,
2141 amd64_emit_rsh_signed
,
2142 amd64_emit_rsh_unsigned
,
2150 amd64_emit_less_signed
,
2151 amd64_emit_less_unsigned
,
2155 amd64_write_goto_address
,
2160 amd64_emit_stack_flush
,
2161 amd64_emit_zero_ext
,
2163 amd64_emit_stack_adjust
,
2164 amd64_emit_int_call_1
,
2165 amd64_emit_void_call_2
,
2174 #endif /* __x86_64__ */
2177 i386_emit_prologue (void)
2179 EMIT_ASM32 (i386_prologue
,
2183 /* At this point, the raw regs base address is at 8(%ebp), and the
2184 value pointer is at 12(%ebp). */
2188 i386_emit_epilogue (void)
2190 EMIT_ASM32 (i386_epilogue
,
2191 "mov 12(%ebp),%ecx\n\t"
2192 "mov %eax,(%ecx)\n\t"
2193 "mov %ebx,0x4(%ecx)\n\t"
2201 i386_emit_add (void)
2203 EMIT_ASM32 (i386_add
,
2204 "add (%esp),%eax\n\t"
2205 "adc 0x4(%esp),%ebx\n\t"
2206 "lea 0x8(%esp),%esp");
2210 i386_emit_sub (void)
2212 EMIT_ASM32 (i386_sub
,
2213 "subl %eax,(%esp)\n\t"
2214 "sbbl %ebx,4(%esp)\n\t"
2220 i386_emit_mul (void)
2226 i386_emit_lsh (void)
2232 i386_emit_rsh_signed (void)
2238 i386_emit_rsh_unsigned (void)
2244 i386_emit_ext (int arg
)
2249 EMIT_ASM32 (i386_ext_8
,
2252 "movl %eax,%ebx\n\t"
2256 EMIT_ASM32 (i386_ext_16
,
2258 "movl %eax,%ebx\n\t"
2262 EMIT_ASM32 (i386_ext_32
,
2263 "movl %eax,%ebx\n\t"
2272 i386_emit_log_not (void)
2274 EMIT_ASM32 (i386_log_not
,
2276 "test %eax,%eax\n\t"
2283 i386_emit_bit_and (void)
2285 EMIT_ASM32 (i386_and
,
2286 "and (%esp),%eax\n\t"
2287 "and 0x4(%esp),%ebx\n\t"
2288 "lea 0x8(%esp),%esp");
2292 i386_emit_bit_or (void)
2294 EMIT_ASM32 (i386_or
,
2295 "or (%esp),%eax\n\t"
2296 "or 0x4(%esp),%ebx\n\t"
2297 "lea 0x8(%esp),%esp");
2301 i386_emit_bit_xor (void)
2303 EMIT_ASM32 (i386_xor
,
2304 "xor (%esp),%eax\n\t"
2305 "xor 0x4(%esp),%ebx\n\t"
2306 "lea 0x8(%esp),%esp");
2310 i386_emit_bit_not (void)
2312 EMIT_ASM32 (i386_bit_not
,
2313 "xor $0xffffffff,%eax\n\t"
2314 "xor $0xffffffff,%ebx\n\t");
2318 i386_emit_equal (void)
2320 EMIT_ASM32 (i386_equal
,
2321 "cmpl %ebx,4(%esp)\n\t"
2322 "jne .Li386_equal_false\n\t"
2323 "cmpl %eax,(%esp)\n\t"
2324 "je .Li386_equal_true\n\t"
2325 ".Li386_equal_false:\n\t"
2327 "jmp .Li386_equal_end\n\t"
2328 ".Li386_equal_true:\n\t"
2330 ".Li386_equal_end:\n\t"
2332 "lea 0x8(%esp),%esp");
2336 i386_emit_less_signed (void)
2338 EMIT_ASM32 (i386_less_signed
,
2339 "cmpl %ebx,4(%esp)\n\t"
2340 "jl .Li386_less_signed_true\n\t"
2341 "jne .Li386_less_signed_false\n\t"
2342 "cmpl %eax,(%esp)\n\t"
2343 "jl .Li386_less_signed_true\n\t"
2344 ".Li386_less_signed_false:\n\t"
2346 "jmp .Li386_less_signed_end\n\t"
2347 ".Li386_less_signed_true:\n\t"
2349 ".Li386_less_signed_end:\n\t"
2351 "lea 0x8(%esp),%esp");
2355 i386_emit_less_unsigned (void)
2357 EMIT_ASM32 (i386_less_unsigned
,
2358 "cmpl %ebx,4(%esp)\n\t"
2359 "jb .Li386_less_unsigned_true\n\t"
2360 "jne .Li386_less_unsigned_false\n\t"
2361 "cmpl %eax,(%esp)\n\t"
2362 "jb .Li386_less_unsigned_true\n\t"
2363 ".Li386_less_unsigned_false:\n\t"
2365 "jmp .Li386_less_unsigned_end\n\t"
2366 ".Li386_less_unsigned_true:\n\t"
2368 ".Li386_less_unsigned_end:\n\t"
2370 "lea 0x8(%esp),%esp");
2374 i386_emit_ref (int size
)
2379 EMIT_ASM32 (i386_ref1
,
2383 EMIT_ASM32 (i386_ref2
,
2387 EMIT_ASM32 (i386_ref4
,
2388 "movl (%eax),%eax");
2391 EMIT_ASM32 (i386_ref8
,
2392 "movl 4(%eax),%ebx\n\t"
2393 "movl (%eax),%eax");
2399 i386_emit_if_goto (int *offset_p
, int *size_p
)
2401 EMIT_ASM32 (i386_if_goto
,
2407 /* Don't trust the assembler to choose the right jump */
2408 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2411 *offset_p
= 11; /* be sure that this matches the sequence above */
2417 i386_emit_goto (int *offset_p
, int *size_p
)
2419 EMIT_ASM32 (i386_goto
,
2420 /* Don't trust the assembler to choose the right jump */
2421 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2429 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2431 int diff
= (to
- (from
+ size
));
2432 unsigned char buf
[sizeof (int)];
2434 /* We're only doing 4-byte sizes at the moment. */
2441 memcpy (buf
, &diff
, sizeof (int));
2442 target_write_memory (from
, buf
, sizeof (int));
2446 i386_emit_const (LONGEST num
)
2448 unsigned char buf
[16];
2450 CORE_ADDR buildaddr
= current_insn_ptr
;
2453 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2454 lo
= num
& 0xffffffff;
2455 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2457 hi
= ((num
>> 32) & 0xffffffff);
2460 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2461 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2466 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2468 append_insns (&buildaddr
, i
, buf
);
2469 current_insn_ptr
= buildaddr
;
2473 i386_emit_call (CORE_ADDR fn
)
2475 unsigned char buf
[16];
2477 CORE_ADDR buildaddr
;
2479 buildaddr
= current_insn_ptr
;
2481 buf
[i
++] = 0xe8; /* call <reladdr> */
2482 offset
= ((int) fn
) - (buildaddr
+ 5);
2483 memcpy (buf
+ 1, &offset
, 4);
2484 append_insns (&buildaddr
, 5, buf
);
2485 current_insn_ptr
= buildaddr
;
2489 i386_emit_reg (int reg
)
2491 unsigned char buf
[16];
2493 CORE_ADDR buildaddr
;
2495 EMIT_ASM32 (i386_reg_a
,
2497 buildaddr
= current_insn_ptr
;
2499 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2500 memcpy (&buf
[i
], ®
, sizeof (reg
));
2502 append_insns (&buildaddr
, i
, buf
);
2503 current_insn_ptr
= buildaddr
;
2504 EMIT_ASM32 (i386_reg_b
,
2505 "mov %eax,4(%esp)\n\t"
2506 "mov 8(%ebp),%eax\n\t"
2508 i386_emit_call (get_raw_reg_func_addr ());
2509 EMIT_ASM32 (i386_reg_c
,
2511 "lea 0x8(%esp),%esp");
2515 i386_emit_pop (void)
2517 EMIT_ASM32 (i386_pop
,
2523 i386_emit_stack_flush (void)
2525 EMIT_ASM32 (i386_stack_flush
,
2531 i386_emit_zero_ext (int arg
)
2536 EMIT_ASM32 (i386_zero_ext_8
,
2537 "and $0xff,%eax\n\t"
2541 EMIT_ASM32 (i386_zero_ext_16
,
2542 "and $0xffff,%eax\n\t"
2546 EMIT_ASM32 (i386_zero_ext_32
,
2555 i386_emit_swap (void)
2557 EMIT_ASM32 (i386_swap
,
2567 i386_emit_stack_adjust (int n
)
2569 unsigned char buf
[16];
2571 CORE_ADDR buildaddr
= current_insn_ptr
;
2574 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2578 append_insns (&buildaddr
, i
, buf
);
2579 current_insn_ptr
= buildaddr
;
2582 /* FN's prototype is `LONGEST(*fn)(int)'. */
2585 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2587 unsigned char buf
[16];
2589 CORE_ADDR buildaddr
;
2591 EMIT_ASM32 (i386_int_call_1_a
,
2592 /* Reserve a bit of stack space. */
2594 /* Put the one argument on the stack. */
2595 buildaddr
= current_insn_ptr
;
2597 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2600 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2602 append_insns (&buildaddr
, i
, buf
);
2603 current_insn_ptr
= buildaddr
;
2604 i386_emit_call (fn
);
2605 EMIT_ASM32 (i386_int_call_1_c
,
2607 "lea 0x8(%esp),%esp");
2610 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2613 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2615 unsigned char buf
[16];
2617 CORE_ADDR buildaddr
;
2619 EMIT_ASM32 (i386_void_call_2_a
,
2620 /* Preserve %eax only; we don't have to worry about %ebx. */
2622 /* Reserve a bit of stack space for arguments. */
2623 "sub $0x10,%esp\n\t"
2624 /* Copy "top" to the second argument position. (Note that
2625 we can't assume function won't scribble on its
2626 arguments, so don't try to restore from this.) */
2627 "mov %eax,4(%esp)\n\t"
2628 "mov %ebx,8(%esp)");
2629 /* Put the first argument on the stack. */
2630 buildaddr
= current_insn_ptr
;
2632 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2635 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2637 append_insns (&buildaddr
, i
, buf
);
2638 current_insn_ptr
= buildaddr
;
2639 i386_emit_call (fn
);
2640 EMIT_ASM32 (i386_void_call_2_b
,
2641 "lea 0x10(%esp),%esp\n\t"
2642 /* Restore original stack top. */
2648 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2651 /* Check low half first, more likely to be decider */
2652 "cmpl %eax,(%esp)\n\t"
2653 "jne .Leq_fallthru\n\t"
2654 "cmpl %ebx,4(%esp)\n\t"
2655 "jne .Leq_fallthru\n\t"
2656 "lea 0x8(%esp),%esp\n\t"
2659 /* jmp, but don't trust the assembler to choose the right jump */
2660 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2661 ".Leq_fallthru:\n\t"
2662 "lea 0x8(%esp),%esp\n\t"
2673 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2676 /* Check low half first, more likely to be decider */
2677 "cmpl %eax,(%esp)\n\t"
2679 "cmpl %ebx,4(%esp)\n\t"
2680 "je .Lne_fallthru\n\t"
2682 "lea 0x8(%esp),%esp\n\t"
2685 /* jmp, but don't trust the assembler to choose the right jump */
2686 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2687 ".Lne_fallthru:\n\t"
2688 "lea 0x8(%esp),%esp\n\t"
2699 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2702 "cmpl %ebx,4(%esp)\n\t"
2704 "jne .Llt_fallthru\n\t"
2705 "cmpl %eax,(%esp)\n\t"
2706 "jnl .Llt_fallthru\n\t"
2708 "lea 0x8(%esp),%esp\n\t"
2711 /* jmp, but don't trust the assembler to choose the right jump */
2712 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2713 ".Llt_fallthru:\n\t"
2714 "lea 0x8(%esp),%esp\n\t"
2725 i386_emit_le_goto (int *offset_p
, int *size_p
)
2728 "cmpl %ebx,4(%esp)\n\t"
2730 "jne .Lle_fallthru\n\t"
2731 "cmpl %eax,(%esp)\n\t"
2732 "jnle .Lle_fallthru\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2737 /* jmp, but don't trust the assembler to choose the right jump */
2738 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2739 ".Lle_fallthru:\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2751 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2754 "cmpl %ebx,4(%esp)\n\t"
2756 "jne .Lgt_fallthru\n\t"
2757 "cmpl %eax,(%esp)\n\t"
2758 "jng .Lgt_fallthru\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2763 /* jmp, but don't trust the assembler to choose the right jump */
2764 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 ".Lgt_fallthru:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2777 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2780 "cmpl %ebx,4(%esp)\n\t"
2782 "jne .Lge_fallthru\n\t"
2783 "cmpl %eax,(%esp)\n\t"
2784 "jnge .Lge_fallthru\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2789 /* jmp, but don't trust the assembler to choose the right jump */
2790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 ".Lge_fallthru:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2802 struct emit_ops i386_emit_ops
=
2810 i386_emit_rsh_signed
,
2811 i386_emit_rsh_unsigned
,
2819 i386_emit_less_signed
,
2820 i386_emit_less_unsigned
,
2824 i386_write_goto_address
,
2829 i386_emit_stack_flush
,
2832 i386_emit_stack_adjust
,
2833 i386_emit_int_call_1
,
2834 i386_emit_void_call_2
,
2844 static struct emit_ops
*
2848 if (is_64bit_tdesc ())
2849 return &amd64_emit_ops
;
2852 return &i386_emit_ops
;
2855 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2857 static const gdb_byte
*
2858 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2860 *size
= x86_breakpoint_len
;
2861 return x86_breakpoint
;
2865 x86_supports_range_stepping (void)
2870 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2874 x86_supports_hardware_single_step (void)
2880 x86_get_ipa_tdesc_idx (void)
2882 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2883 const struct target_desc
*tdesc
= regcache
->tdesc
;
2886 return amd64_get_ipa_tdesc_idx (tdesc
);
2889 if (tdesc
== tdesc_i386_linux_no_xml
)
2890 return X86_TDESC_SSE
;
2892 return i386_get_ipa_tdesc_idx (tdesc
);
2895 /* This is initialized assuming an amd64 target.
2896 x86_arch_setup will correct it for i386 or amd64 targets. */
2898 struct linux_target_ops the_low_target
=
2900 NULL
, /* breakpoint_kind_from_pc */
2901 x86_sw_breakpoint_from_kind
,
2905 x86_supports_z_point_type
,
2908 x86_stopped_by_watchpoint
,
2909 x86_stopped_data_address
,
2910 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2911 native i386 case (no registers smaller than an xfer unit), and are not
2912 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2915 /* need to fix up i386 siginfo if host is amd64 */
2917 x86_linux_new_process
,
2918 x86_linux_delete_process
,
2919 x86_linux_new_thread
,
2920 x86_linux_delete_thread
,
2922 x86_linux_prepare_to_resume
,
2923 x86_linux_process_qsupported
,
2924 x86_supports_tracepoints
,
2925 x86_get_thread_area
,
2926 x86_install_fast_tracepoint_jump_pad
,
2928 x86_get_min_fast_tracepoint_insn_len
,
2929 x86_supports_range_stepping
,
2930 NULL
, /* breakpoint_kind_from_current_state */
2931 x86_supports_hardware_single_step
,
2932 x86_get_syscall_trapinfo
,
2933 x86_get_ipa_tdesc_idx
,
2936 /* The linux target ops object. */
2938 linux_process_target
*the_linux_target
= &the_x86_target
;
2941 initialize_low_arch (void)
2943 /* Initialize the Linux target descriptions. */
2945 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2946 copy_target_description (tdesc_amd64_linux_no_xml
,
2947 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2949 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2952 tdesc_i386_linux_no_xml
= allocate_target_description ();
2953 copy_target_description (tdesc_i386_linux_no_xml
,
2954 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2955 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2957 initialize_regsets_info (&x86_regsets_info
);