1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
109 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
111 bool supports_z_point_type (char z_type
) override
;
115 void low_arch_setup () override
;
117 bool low_cannot_fetch_register (int regno
) override
;
119 bool low_cannot_store_register (int regno
) override
;
121 bool low_supports_breakpoints () override
;
123 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
125 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
127 int low_decr_pc_after_break () override
;
129 bool low_breakpoint_at (CORE_ADDR pc
) override
;
132 /* The singleton target ops object. */
134 static x86_target the_x86_target
;
136 /* Per-process arch-specific data we want to keep. */
138 struct arch_process_info
140 struct x86_debug_reg_state debug_reg_state
;
145 /* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout.
147 Note that the transfer layout uses 64-bit regs. */
148 static /*const*/ int i386_regmap
[] =
150 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
151 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
152 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
153 DS
* 8, ES
* 8, FS
* 8, GS
* 8
156 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* So code below doesn't have to care, i386 or amd64. */
159 #define ORIG_EAX ORIG_RAX
162 static const int x86_64_regmap
[] =
164 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
165 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
166 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
167 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
168 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
169 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
170 -1, -1, -1, -1, -1, -1, -1, -1,
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
174 -1, -1, -1, -1, -1, -1, -1, -1,
176 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
181 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
182 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
183 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
186 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
188 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
195 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
196 #define X86_64_USER_REGS (GS + 1)
198 #else /* ! __x86_64__ */
200 /* Mapping between the general-purpose registers in `struct user'
201 format and GDB's register array layout. */
202 static /*const*/ int i386_regmap
[] =
204 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
205 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
206 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
207 DS
* 4, ES
* 4, FS
* 4, GS
* 4
210 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218 /* Returns true if the current inferior belongs to a x86-64 process,
222 is_64bit_tdesc (void)
224 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
226 return register_size (regcache
->tdesc
, 0) == 8;
232 /* Called by libthread_db. */
235 ps_get_thread_area (struct ps_prochandle
*ph
,
236 lwpid_t lwpid
, int idx
, void **base
)
239 int use_64bit
= is_64bit_tdesc ();
246 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
250 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
261 unsigned int desc
[4];
263 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
264 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
267 /* Ensure we properly extend the value to 64-bits for x86_64. */
268 *base
= (void *) (uintptr_t) desc
[1];
273 /* Get the thread area address. This is used to recognize which
274 thread is which when tracing with the in-process agent library. We
275 don't read anything from the address, and treat it as opaque; it's
276 the address itself that we assume is unique per-thread. */
279 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
282 int use_64bit
= is_64bit_tdesc ();
287 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
289 *addr
= (CORE_ADDR
) (uintptr_t) base
;
298 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
299 struct thread_info
*thr
= get_lwp_thread (lwp
);
300 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
301 unsigned int desc
[4];
303 const int reg_thread_area
= 3; /* bits to scale down register value. */
306 collect_register_by_name (regcache
, "gs", &gs
);
308 idx
= gs
>> reg_thread_area
;
310 if (ptrace (PTRACE_GET_THREAD_AREA
,
312 (void *) (long) idx
, (unsigned long) &desc
) < 0)
323 x86_target::low_cannot_store_register (int regno
)
326 if (is_64bit_tdesc ())
330 return regno
>= I386_NUM_REGS
;
334 x86_target::low_cannot_fetch_register (int regno
)
337 if (is_64bit_tdesc ())
341 return regno
>= I386_NUM_REGS
;
345 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
350 if (register_size (regcache
->tdesc
, 0) == 8)
352 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
353 if (x86_64_regmap
[i
] != -1)
354 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
359 int lwpid
= lwpid_of (current_thread
);
361 collect_register_by_name (regcache
, "fs_base", &base
);
362 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
364 collect_register_by_name (regcache
, "gs_base", &base
);
365 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
377 for (i
= 0; i
< I386_NUM_REGS
; i
++)
378 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
380 collect_register_by_name (regcache
, "orig_eax",
381 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
384 /* Sign extend EAX value to avoid potential syscall restart
387 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
388 for a detailed explanation. */
389 if (register_size (regcache
->tdesc
, 0) == 4)
391 void *ptr
= ((gdb_byte
*) buf
392 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
394 *(int64_t *) ptr
= *(int32_t *) ptr
;
400 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
405 if (register_size (regcache
->tdesc
, 0) == 8)
407 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
408 if (x86_64_regmap
[i
] != -1)
409 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
411 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
414 int lwpid
= lwpid_of (current_thread
);
416 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
417 supply_register_by_name (regcache
, "fs_base", &base
);
419 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
420 supply_register_by_name (regcache
, "gs_base", &base
);
427 for (i
= 0; i
< I386_NUM_REGS
; i
++)
428 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
430 supply_register_by_name (regcache
, "orig_eax",
431 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
435 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
438 i387_cache_to_fxsave (regcache
, buf
);
440 i387_cache_to_fsave (regcache
, buf
);
445 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
448 i387_fxsave_to_cache (regcache
, buf
);
450 i387_fsave_to_cache (regcache
, buf
);
457 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
459 i387_cache_to_fxsave (regcache
, buf
);
463 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
465 i387_fxsave_to_cache (regcache
, buf
);
471 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
473 i387_cache_to_xsave (regcache
, buf
);
477 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
479 i387_xsave_to_cache (regcache
, buf
);
482 /* ??? The non-biarch i386 case stores all the i387 regs twice.
483 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
484 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
485 doesn't work. IWBN to avoid the duplication in the case where it
486 does work. Maybe the arch_setup routine could check whether it works
487 and update the supported regsets accordingly. */
489 static struct regset_info x86_regsets
[] =
491 #ifdef HAVE_PTRACE_GETREGS
492 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
494 x86_fill_gregset
, x86_store_gregset
},
495 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
496 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
498 # ifdef HAVE_PTRACE_GETFPXREGS
499 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
501 x86_fill_fpxregset
, x86_store_fpxregset
},
504 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
506 x86_fill_fpregset
, x86_store_fpregset
},
507 #endif /* HAVE_PTRACE_GETREGS */
512 x86_target::low_supports_breakpoints ()
518 x86_target::low_get_pc (regcache
*regcache
)
520 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
526 collect_register_by_name (regcache
, "rip", &pc
);
527 return (CORE_ADDR
) pc
;
533 collect_register_by_name (regcache
, "eip", &pc
);
534 return (CORE_ADDR
) pc
;
539 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
541 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
547 supply_register_by_name (regcache
, "rip", &newpc
);
553 supply_register_by_name (regcache
, "eip", &newpc
);
558 x86_target::low_decr_pc_after_break ()
564 static const gdb_byte x86_breakpoint
[] = { 0xCC };
565 #define x86_breakpoint_len 1
568 x86_target::low_breakpoint_at (CORE_ADDR pc
)
572 read_memory (pc
, &c
, 1);
579 /* Low-level function vector. */
580 struct x86_dr_low_type x86_dr_low
=
582 x86_linux_dr_set_control
,
583 x86_linux_dr_set_addr
,
584 x86_linux_dr_get_addr
,
585 x86_linux_dr_get_status
,
586 x86_linux_dr_get_control
,
590 /* Breakpoint/Watchpoint support. */
593 x86_target::supports_z_point_type (char z_type
)
599 case Z_PACKET_WRITE_WP
:
600 case Z_PACKET_ACCESS_WP
:
608 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
609 int size
, struct raw_breakpoint
*bp
)
611 struct process_info
*proc
= current_process ();
615 case raw_bkpt_type_hw
:
616 case raw_bkpt_type_write_wp
:
617 case raw_bkpt_type_access_wp
:
619 enum target_hw_bp_type hw_type
620 = raw_bkpt_type_to_target_hw_bp_type (type
);
621 struct x86_debug_reg_state
*state
622 = &proc
->priv
->arch_private
->debug_reg_state
;
624 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
634 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
635 int size
, struct raw_breakpoint
*bp
)
637 struct process_info
*proc
= current_process ();
641 case raw_bkpt_type_hw
:
642 case raw_bkpt_type_write_wp
:
643 case raw_bkpt_type_access_wp
:
645 enum target_hw_bp_type hw_type
646 = raw_bkpt_type_to_target_hw_bp_type (type
);
647 struct x86_debug_reg_state
*state
648 = &proc
->priv
->arch_private
->debug_reg_state
;
650 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
659 x86_stopped_by_watchpoint (void)
661 struct process_info
*proc
= current_process ();
662 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
666 x86_stopped_data_address (void)
668 struct process_info
*proc
= current_process ();
670 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
676 /* Called when a new process is created. */
678 static struct arch_process_info
*
679 x86_linux_new_process (void)
681 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
683 x86_low_init_dregs (&info
->debug_reg_state
);
688 /* Called when a process is being deleted. */
691 x86_linux_delete_process (struct arch_process_info
*info
)
696 /* Target routine for linux_new_fork. */
699 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
701 /* These are allocated by linux_add_process. */
702 gdb_assert (parent
->priv
!= NULL
703 && parent
->priv
->arch_private
!= NULL
);
704 gdb_assert (child
->priv
!= NULL
705 && child
->priv
->arch_private
!= NULL
);
707 /* Linux kernel before 2.6.33 commit
708 72f674d203cd230426437cdcf7dd6f681dad8b0d
709 will inherit hardware debug registers from parent
710 on fork/vfork/clone. Newer Linux kernels create such tasks with
711 zeroed debug registers.
713 GDB core assumes the child inherits the watchpoints/hw
714 breakpoints of the parent, and will remove them all from the
715 forked off process. Copy the debug registers mirrors into the
716 new process so that all breakpoints and watchpoints can be
717 removed together. The debug registers mirror will become zeroed
718 in the end before detaching the forked off process, thus making
719 this compatible with older Linux kernels too. */
721 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
724 /* See nat/x86-dregs.h. */
726 struct x86_debug_reg_state
*
727 x86_debug_reg_state (pid_t pid
)
729 struct process_info
*proc
= find_process_pid (pid
);
731 return &proc
->priv
->arch_private
->debug_reg_state
;
734 /* When GDBSERVER is built as a 64-bit application on linux, the
735 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
736 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
737 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
738 conversion in-place ourselves. */
740 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
741 layout of the inferiors' architecture. Returns true if any
742 conversion was done; false otherwise. If DIRECTION is 1, then copy
743 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
747 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
750 unsigned int machine
;
751 int tid
= lwpid_of (current_thread
);
752 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
754 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
755 if (!is_64bit_tdesc ())
756 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
758 /* No fixup for native x32 GDB. */
759 else if (!is_elf64
&& sizeof (void *) == 8)
760 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
769 /* Format of XSAVE extended state is:
773 sw_usable_bytes[464..511]
774 xstate_hdr_bytes[512..575]
779 Same memory layout will be used for the coredump NT_X86_XSTATE
780 representing the XSAVE extended state registers.
782 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
783 extended state mask, which is the same as the extended control register
784 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
785 together with the mask saved in the xstate_hdr_bytes to determine what
786 states the processor/OS supports and what state, used or initialized,
787 the process/thread is in. */
788 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
790 /* Does the current host support the GETFPXREGS request? The header
791 file may or may not define it, and even if it is defined, the
792 kernel will return EIO if it's running on a pre-SSE processor. */
793 int have_ptrace_getfpxregs
=
794 #ifdef HAVE_PTRACE_GETFPXREGS
801 /* Get Linux/x86 target description from running target. */
803 static const struct target_desc
*
804 x86_linux_read_description (void)
806 unsigned int machine
;
810 static uint64_t xcr0
;
811 struct regset_info
*regset
;
813 tid
= lwpid_of (current_thread
);
815 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
817 if (sizeof (void *) == 4)
820 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
822 else if (machine
== EM_X86_64
)
823 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
827 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
828 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
830 elf_fpxregset_t fpxregs
;
832 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
834 have_ptrace_getfpxregs
= 0;
835 have_ptrace_getregset
= 0;
836 return i386_linux_read_description (X86_XSTATE_X87
);
839 have_ptrace_getfpxregs
= 1;
845 x86_xcr0
= X86_XSTATE_SSE_MASK
;
849 if (machine
== EM_X86_64
)
850 return tdesc_amd64_linux_no_xml
;
853 return tdesc_i386_linux_no_xml
;
856 if (have_ptrace_getregset
== -1)
858 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
861 iov
.iov_base
= xstateregs
;
862 iov
.iov_len
= sizeof (xstateregs
);
864 /* Check if PTRACE_GETREGSET works. */
865 if (ptrace (PTRACE_GETREGSET
, tid
,
866 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
867 have_ptrace_getregset
= 0;
870 have_ptrace_getregset
= 1;
872 /* Get XCR0 from XSAVE extended state. */
873 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
874 / sizeof (uint64_t))];
876 /* Use PTRACE_GETREGSET if it is available. */
877 for (regset
= x86_regsets
;
878 regset
->fill_function
!= NULL
; regset
++)
879 if (regset
->get_request
== PTRACE_GETREGSET
)
880 regset
->size
= X86_XSTATE_SIZE (xcr0
);
881 else if (regset
->type
!= GENERAL_REGS
)
886 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
887 xcr0_features
= (have_ptrace_getregset
888 && (xcr0
& X86_XSTATE_ALL_MASK
));
893 if (machine
== EM_X86_64
)
896 const target_desc
*tdesc
= NULL
;
900 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
905 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
911 const target_desc
*tdesc
= NULL
;
914 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
917 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
922 gdb_assert_not_reached ("failed to return tdesc");
925 /* Update all the target description of all processes; a new GDB
926 connected, and it may or not support xml target descriptions. */
929 x86_target::update_xmltarget ()
931 struct thread_info
*saved_thread
= current_thread
;
933 /* Before changing the register cache's internal layout, flush the
934 contents of the current valid caches back to the threads, and
935 release the current regcache objects. */
938 for_each_process ([this] (process_info
*proc
) {
941 /* Look up any thread of this process. */
942 current_thread
= find_any_thread_of_pid (pid
);
947 current_thread
= saved_thread
;
950 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
954 x86_linux_process_qsupported (char **features
, int count
)
958 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
959 with "i386" in qSupported query, it supports x86 XML target
962 for (i
= 0; i
< count
; i
++)
964 const char *feature
= features
[i
];
966 if (startswith (feature
, "xmlRegisters="))
968 char *copy
= xstrdup (feature
+ 13);
971 for (char *p
= strtok_r (copy
, ",", &saveptr
);
973 p
= strtok_r (NULL
, ",", &saveptr
))
975 if (strcmp (p
, "i386") == 0)
985 the_x86_target
.update_xmltarget ();
988 /* Common for x86/x86-64. */
990 static struct regsets_info x86_regsets_info
=
992 x86_regsets
, /* regsets */
994 NULL
, /* disabled_regsets */
998 static struct regs_info amd64_linux_regs_info
=
1000 NULL
, /* regset_bitmap */
1001 NULL
, /* usrregs_info */
1005 static struct usrregs_info i386_linux_usrregs_info
=
1011 static struct regs_info i386_linux_regs_info
=
1013 NULL
, /* regset_bitmap */
1014 &i386_linux_usrregs_info
,
1019 x86_target::get_regs_info ()
1022 if (is_64bit_tdesc ())
1023 return &amd64_linux_regs_info
;
1026 return &i386_linux_regs_info
;
1029 /* Initialize the target description for the architecture of the
1033 x86_target::low_arch_setup ()
1035 current_process ()->tdesc
= x86_linux_read_description ();
1038 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1039 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1042 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1044 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1050 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1051 *sysno
= (int) l_sysno
;
1054 collect_register_by_name (regcache
, "orig_eax", sysno
);
1058 x86_supports_tracepoints (void)
1064 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1066 target_write_memory (*to
, buf
, len
);
1071 push_opcode (unsigned char *buf
, const char *op
)
1073 unsigned char *buf_org
= buf
;
1078 unsigned long ul
= strtoul (op
, &endptr
, 16);
1087 return buf
- buf_org
;
1092 /* Build a jump pad that saves registers and calls a collection
1093 function. Writes a jump instruction to the jump pad to
1094 JJUMPAD_INSN. The caller is responsible to write it in at the
1095 tracepoint address. */
1098 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1099 CORE_ADDR collector
,
1102 CORE_ADDR
*jump_entry
,
1103 CORE_ADDR
*trampoline
,
1104 ULONGEST
*trampoline_size
,
1105 unsigned char *jjump_pad_insn
,
1106 ULONGEST
*jjump_pad_insn_size
,
1107 CORE_ADDR
*adjusted_insn_addr
,
1108 CORE_ADDR
*adjusted_insn_addr_end
,
1111 unsigned char buf
[40];
1115 CORE_ADDR buildaddr
= *jump_entry
;
1117 /* Build the jump pad. */
1119 /* First, do tracepoint data collection. Save registers. */
1121 /* Need to ensure stack pointer saved first. */
1122 buf
[i
++] = 0x54; /* push %rsp */
1123 buf
[i
++] = 0x55; /* push %rbp */
1124 buf
[i
++] = 0x57; /* push %rdi */
1125 buf
[i
++] = 0x56; /* push %rsi */
1126 buf
[i
++] = 0x52; /* push %rdx */
1127 buf
[i
++] = 0x51; /* push %rcx */
1128 buf
[i
++] = 0x53; /* push %rbx */
1129 buf
[i
++] = 0x50; /* push %rax */
1130 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1131 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1132 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1133 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1134 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1135 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1136 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1137 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1138 buf
[i
++] = 0x9c; /* pushfq */
1139 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1141 memcpy (buf
+ i
, &tpaddr
, 8);
1143 buf
[i
++] = 0x57; /* push %rdi */
1144 append_insns (&buildaddr
, i
, buf
);
1146 /* Stack space for the collecting_t object. */
1148 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1149 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1150 memcpy (buf
+ i
, &tpoint
, 8);
1152 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1153 i
+= push_opcode (&buf
[i
],
1154 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1155 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1156 append_insns (&buildaddr
, i
, buf
);
1160 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1161 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1163 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1164 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1165 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1166 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1167 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1168 append_insns (&buildaddr
, i
, buf
);
1170 /* Set up the gdb_collect call. */
1171 /* At this point, (stack pointer + 0x18) is the base of our saved
1175 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1176 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1178 /* tpoint address may be 64-bit wide. */
1179 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1180 memcpy (buf
+ i
, &tpoint
, 8);
1182 append_insns (&buildaddr
, i
, buf
);
1184 /* The collector function being in the shared library, may be
1185 >31-bits away off the jump pad. */
1187 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1188 memcpy (buf
+ i
, &collector
, 8);
1190 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1191 append_insns (&buildaddr
, i
, buf
);
1193 /* Clear the spin-lock. */
1195 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1196 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1197 memcpy (buf
+ i
, &lockaddr
, 8);
1199 append_insns (&buildaddr
, i
, buf
);
1201 /* Remove stack that had been used for the collect_t object. */
1203 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1204 append_insns (&buildaddr
, i
, buf
);
1206 /* Restore register state. */
1208 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1212 buf
[i
++] = 0x9d; /* popfq */
1213 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1214 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1215 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1216 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1217 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1221 buf
[i
++] = 0x58; /* pop %rax */
1222 buf
[i
++] = 0x5b; /* pop %rbx */
1223 buf
[i
++] = 0x59; /* pop %rcx */
1224 buf
[i
++] = 0x5a; /* pop %rdx */
1225 buf
[i
++] = 0x5e; /* pop %rsi */
1226 buf
[i
++] = 0x5f; /* pop %rdi */
1227 buf
[i
++] = 0x5d; /* pop %rbp */
1228 buf
[i
++] = 0x5c; /* pop %rsp */
1229 append_insns (&buildaddr
, i
, buf
);
1231 /* Now, adjust the original instruction to execute in the jump
1233 *adjusted_insn_addr
= buildaddr
;
1234 relocate_instruction (&buildaddr
, tpaddr
);
1235 *adjusted_insn_addr_end
= buildaddr
;
1237 /* Finally, write a jump back to the program. */
1239 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1240 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1243 "E.Jump back from jump pad too far from tracepoint "
1244 "(offset 0x%" PRIx64
" > int32).", loffset
);
1248 offset
= (int) loffset
;
1249 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1250 memcpy (buf
+ 1, &offset
, 4);
1251 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1253 /* The jump pad is now built. Wire in a jump to our jump pad. This
1254 is always done last (by our caller actually), so that we can
1255 install fast tracepoints with threads running. This relies on
1256 the agent's atomic write support. */
1257 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1258 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1261 "E.Jump pad too far from tracepoint "
1262 "(offset 0x%" PRIx64
" > int32).", loffset
);
1266 offset
= (int) loffset
;
1268 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1269 memcpy (buf
+ 1, &offset
, 4);
1270 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1271 *jjump_pad_insn_size
= sizeof (jump_insn
);
1273 /* Return the end address of our pad. */
1274 *jump_entry
= buildaddr
;
1279 #endif /* __x86_64__ */
1281 /* Build a jump pad that saves registers and calls a collection
1282 function. Writes a jump instruction to the jump pad to
1283 JJUMPAD_INSN. The caller is responsible to write it in at the
1284 tracepoint address. */
1287 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1288 CORE_ADDR collector
,
1291 CORE_ADDR
*jump_entry
,
1292 CORE_ADDR
*trampoline
,
1293 ULONGEST
*trampoline_size
,
1294 unsigned char *jjump_pad_insn
,
1295 ULONGEST
*jjump_pad_insn_size
,
1296 CORE_ADDR
*adjusted_insn_addr
,
1297 CORE_ADDR
*adjusted_insn_addr_end
,
1300 unsigned char buf
[0x100];
1302 CORE_ADDR buildaddr
= *jump_entry
;
1304 /* Build the jump pad. */
1306 /* First, do tracepoint data collection. Save registers. */
1308 buf
[i
++] = 0x60; /* pushad */
1309 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1310 *((int *)(buf
+ i
)) = (int) tpaddr
;
1312 buf
[i
++] = 0x9c; /* pushf */
1313 buf
[i
++] = 0x1e; /* push %ds */
1314 buf
[i
++] = 0x06; /* push %es */
1315 buf
[i
++] = 0x0f; /* push %fs */
1317 buf
[i
++] = 0x0f; /* push %gs */
1319 buf
[i
++] = 0x16; /* push %ss */
1320 buf
[i
++] = 0x0e; /* push %cs */
1321 append_insns (&buildaddr
, i
, buf
);
1323 /* Stack space for the collecting_t object. */
1325 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1327 /* Build the object. */
1328 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1329 memcpy (buf
+ i
, &tpoint
, 4);
1331 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1333 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1334 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1335 append_insns (&buildaddr
, i
, buf
);
1337 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1338 If we cared for it, this could be using xchg alternatively. */
1341 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1342 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1344 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1346 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1347 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1348 append_insns (&buildaddr
, i
, buf
);
1351 /* Set up arguments to the gdb_collect call. */
1353 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1354 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1355 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1356 append_insns (&buildaddr
, i
, buf
);
1359 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1360 append_insns (&buildaddr
, i
, buf
);
1363 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1364 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1366 append_insns (&buildaddr
, i
, buf
);
1368 buf
[0] = 0xe8; /* call <reladdr> */
1369 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1370 memcpy (buf
+ 1, &offset
, 4);
1371 append_insns (&buildaddr
, 5, buf
);
1372 /* Clean up after the call. */
1373 buf
[0] = 0x83; /* add $0x8,%esp */
1376 append_insns (&buildaddr
, 3, buf
);
1379 /* Clear the spin-lock. This would need the LOCK prefix on older
1382 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1383 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1384 memcpy (buf
+ i
, &lockaddr
, 4);
1386 append_insns (&buildaddr
, i
, buf
);
1389 /* Remove stack that had been used for the collect_t object. */
1391 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1392 append_insns (&buildaddr
, i
, buf
);
1395 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1398 buf
[i
++] = 0x17; /* pop %ss */
1399 buf
[i
++] = 0x0f; /* pop %gs */
1401 buf
[i
++] = 0x0f; /* pop %fs */
1403 buf
[i
++] = 0x07; /* pop %es */
1404 buf
[i
++] = 0x1f; /* pop %ds */
1405 buf
[i
++] = 0x9d; /* popf */
1406 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1409 buf
[i
++] = 0x61; /* popad */
1410 append_insns (&buildaddr
, i
, buf
);
1412 /* Now, adjust the original instruction to execute in the jump
1414 *adjusted_insn_addr
= buildaddr
;
1415 relocate_instruction (&buildaddr
, tpaddr
);
1416 *adjusted_insn_addr_end
= buildaddr
;
1418 /* Write the jump back to the program. */
1419 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1420 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1421 memcpy (buf
+ 1, &offset
, 4);
1422 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1424 /* The jump pad is now built. Wire in a jump to our jump pad. This
1425 is always done last (by our caller actually), so that we can
1426 install fast tracepoints with threads running. This relies on
1427 the agent's atomic write support. */
1430 /* Create a trampoline. */
1431 *trampoline_size
= sizeof (jump_insn
);
1432 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1434 /* No trampoline space available. */
1436 "E.Cannot allocate trampoline space needed for fast "
1437 "tracepoints on 4-byte instructions.");
1441 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1442 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1443 memcpy (buf
+ 1, &offset
, 4);
1444 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1446 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1447 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1448 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1449 memcpy (buf
+ 2, &offset
, 2);
1450 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1451 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1455 /* Else use a 32-bit relative jump instruction. */
1456 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1457 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1458 memcpy (buf
+ 1, &offset
, 4);
1459 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1460 *jjump_pad_insn_size
= sizeof (jump_insn
);
1463 /* Return the end address of our pad. */
1464 *jump_entry
= buildaddr
;
1470 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1471 CORE_ADDR collector
,
1474 CORE_ADDR
*jump_entry
,
1475 CORE_ADDR
*trampoline
,
1476 ULONGEST
*trampoline_size
,
1477 unsigned char *jjump_pad_insn
,
1478 ULONGEST
*jjump_pad_insn_size
,
1479 CORE_ADDR
*adjusted_insn_addr
,
1480 CORE_ADDR
*adjusted_insn_addr_end
,
1484 if (is_64bit_tdesc ())
1485 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1486 collector
, lockaddr
,
1487 orig_size
, jump_entry
,
1488 trampoline
, trampoline_size
,
1490 jjump_pad_insn_size
,
1492 adjusted_insn_addr_end
,
1496 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1497 collector
, lockaddr
,
1498 orig_size
, jump_entry
,
1499 trampoline
, trampoline_size
,
1501 jjump_pad_insn_size
,
1503 adjusted_insn_addr_end
,
1507 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1511 x86_get_min_fast_tracepoint_insn_len (void)
1513 static int warned_about_fast_tracepoints
= 0;
1516 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1517 used for fast tracepoints. */
1518 if (is_64bit_tdesc ())
1522 if (agent_loaded_p ())
1524 char errbuf
[IPA_BUFSIZ
];
1528 /* On x86, if trampolines are available, then 4-byte jump instructions
1529 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1530 with a 4-byte offset are used instead. */
1531 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1535 /* GDB has no channel to explain to user why a shorter fast
1536 tracepoint is not possible, but at least make GDBserver
1537 mention that something has gone awry. */
1538 if (!warned_about_fast_tracepoints
)
1540 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1541 warned_about_fast_tracepoints
= 1;
1548 /* Indicate that the minimum length is currently unknown since the IPA
1549 has not loaded yet. */
1555 add_insns (unsigned char *start
, int len
)
1557 CORE_ADDR buildaddr
= current_insn_ptr
;
1560 debug_printf ("Adding %d bytes of insn at %s\n",
1561 len
, paddress (buildaddr
));
1563 append_insns (&buildaddr
, len
, start
);
1564 current_insn_ptr
= buildaddr
;
1567 /* Our general strategy for emitting code is to avoid specifying raw
1568 bytes whenever possible, and instead copy a block of inline asm
1569 that is embedded in the function. This is a little messy, because
1570 we need to keep the compiler from discarding what looks like dead
1571 code, plus suppress various warnings. */
1573 #define EMIT_ASM(NAME, INSNS) \
1576 extern unsigned char start_ ## NAME, end_ ## NAME; \
1577 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1578 __asm__ ("jmp end_" #NAME "\n" \
1579 "\t" "start_" #NAME ":" \
1581 "\t" "end_" #NAME ":"); \
1586 #define EMIT_ASM32(NAME,INSNS) \
1589 extern unsigned char start_ ## NAME, end_ ## NAME; \
1590 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1591 __asm__ (".code32\n" \
1592 "\t" "jmp end_" #NAME "\n" \
1593 "\t" "start_" #NAME ":\n" \
1595 "\t" "end_" #NAME ":\n" \
1601 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1608 amd64_emit_prologue (void)
1610 EMIT_ASM (amd64_prologue
,
1612 "movq %rsp,%rbp\n\t"
1613 "sub $0x20,%rsp\n\t"
1614 "movq %rdi,-8(%rbp)\n\t"
1615 "movq %rsi,-16(%rbp)");
1620 amd64_emit_epilogue (void)
1622 EMIT_ASM (amd64_epilogue
,
1623 "movq -16(%rbp),%rdi\n\t"
1624 "movq %rax,(%rdi)\n\t"
1631 amd64_emit_add (void)
1633 EMIT_ASM (amd64_add
,
1634 "add (%rsp),%rax\n\t"
1635 "lea 0x8(%rsp),%rsp");
1639 amd64_emit_sub (void)
1641 EMIT_ASM (amd64_sub
,
1642 "sub %rax,(%rsp)\n\t"
1647 amd64_emit_mul (void)
1653 amd64_emit_lsh (void)
1659 amd64_emit_rsh_signed (void)
1665 amd64_emit_rsh_unsigned (void)
1671 amd64_emit_ext (int arg
)
1676 EMIT_ASM (amd64_ext_8
,
1682 EMIT_ASM (amd64_ext_16
,
1687 EMIT_ASM (amd64_ext_32
,
1696 amd64_emit_log_not (void)
1698 EMIT_ASM (amd64_log_not
,
1699 "test %rax,%rax\n\t"
1705 amd64_emit_bit_and (void)
1707 EMIT_ASM (amd64_and
,
1708 "and (%rsp),%rax\n\t"
1709 "lea 0x8(%rsp),%rsp");
1713 amd64_emit_bit_or (void)
1716 "or (%rsp),%rax\n\t"
1717 "lea 0x8(%rsp),%rsp");
1721 amd64_emit_bit_xor (void)
1723 EMIT_ASM (amd64_xor
,
1724 "xor (%rsp),%rax\n\t"
1725 "lea 0x8(%rsp),%rsp");
1729 amd64_emit_bit_not (void)
1731 EMIT_ASM (amd64_bit_not
,
1732 "xorq $0xffffffffffffffff,%rax");
1736 amd64_emit_equal (void)
1738 EMIT_ASM (amd64_equal
,
1739 "cmp %rax,(%rsp)\n\t"
1740 "je .Lamd64_equal_true\n\t"
1742 "jmp .Lamd64_equal_end\n\t"
1743 ".Lamd64_equal_true:\n\t"
1745 ".Lamd64_equal_end:\n\t"
1746 "lea 0x8(%rsp),%rsp");
1750 amd64_emit_less_signed (void)
1752 EMIT_ASM (amd64_less_signed
,
1753 "cmp %rax,(%rsp)\n\t"
1754 "jl .Lamd64_less_signed_true\n\t"
1756 "jmp .Lamd64_less_signed_end\n\t"
1757 ".Lamd64_less_signed_true:\n\t"
1759 ".Lamd64_less_signed_end:\n\t"
1760 "lea 0x8(%rsp),%rsp");
1764 amd64_emit_less_unsigned (void)
1766 EMIT_ASM (amd64_less_unsigned
,
1767 "cmp %rax,(%rsp)\n\t"
1768 "jb .Lamd64_less_unsigned_true\n\t"
1770 "jmp .Lamd64_less_unsigned_end\n\t"
1771 ".Lamd64_less_unsigned_true:\n\t"
1773 ".Lamd64_less_unsigned_end:\n\t"
1774 "lea 0x8(%rsp),%rsp");
1778 amd64_emit_ref (int size
)
1783 EMIT_ASM (amd64_ref1
,
1787 EMIT_ASM (amd64_ref2
,
1791 EMIT_ASM (amd64_ref4
,
1792 "movl (%rax),%eax");
1795 EMIT_ASM (amd64_ref8
,
1796 "movq (%rax),%rax");
1802 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1804 EMIT_ASM (amd64_if_goto
,
1808 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1816 amd64_emit_goto (int *offset_p
, int *size_p
)
1818 EMIT_ASM (amd64_goto
,
1819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1827 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1829 int diff
= (to
- (from
+ size
));
1830 unsigned char buf
[sizeof (int)];
1838 memcpy (buf
, &diff
, sizeof (int));
1839 target_write_memory (from
, buf
, sizeof (int));
1843 amd64_emit_const (LONGEST num
)
1845 unsigned char buf
[16];
1847 CORE_ADDR buildaddr
= current_insn_ptr
;
1850 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1851 memcpy (&buf
[i
], &num
, sizeof (num
));
1853 append_insns (&buildaddr
, i
, buf
);
1854 current_insn_ptr
= buildaddr
;
1858 amd64_emit_call (CORE_ADDR fn
)
1860 unsigned char buf
[16];
1862 CORE_ADDR buildaddr
;
1865 /* The destination function being in the shared library, may be
1866 >31-bits away off the compiled code pad. */
1868 buildaddr
= current_insn_ptr
;
1870 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1874 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1876 /* Offset is too large for a call. Use callq, but that requires
1877 a register, so avoid it if possible. Use r10, since it is
1878 call-clobbered, we don't have to push/pop it. */
1879 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1881 memcpy (buf
+ i
, &fn
, 8);
1883 buf
[i
++] = 0xff; /* callq *%r10 */
1888 int offset32
= offset64
; /* we know we can't overflow here. */
1890 buf
[i
++] = 0xe8; /* call <reladdr> */
1891 memcpy (buf
+ i
, &offset32
, 4);
1895 append_insns (&buildaddr
, i
, buf
);
1896 current_insn_ptr
= buildaddr
;
1900 amd64_emit_reg (int reg
)
1902 unsigned char buf
[16];
1904 CORE_ADDR buildaddr
;
1906 /* Assume raw_regs is still in %rdi. */
1907 buildaddr
= current_insn_ptr
;
1909 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1910 memcpy (&buf
[i
], ®
, sizeof (reg
));
1912 append_insns (&buildaddr
, i
, buf
);
1913 current_insn_ptr
= buildaddr
;
1914 amd64_emit_call (get_raw_reg_func_addr ());
1918 amd64_emit_pop (void)
1920 EMIT_ASM (amd64_pop
,
1925 amd64_emit_stack_flush (void)
1927 EMIT_ASM (amd64_stack_flush
,
1932 amd64_emit_zero_ext (int arg
)
1937 EMIT_ASM (amd64_zero_ext_8
,
1941 EMIT_ASM (amd64_zero_ext_16
,
1942 "and $0xffff,%rax");
1945 EMIT_ASM (amd64_zero_ext_32
,
1946 "mov $0xffffffff,%rcx\n\t"
1955 amd64_emit_swap (void)
1957 EMIT_ASM (amd64_swap
,
1964 amd64_emit_stack_adjust (int n
)
1966 unsigned char buf
[16];
1968 CORE_ADDR buildaddr
= current_insn_ptr
;
1971 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1975 /* This only handles adjustments up to 16, but we don't expect any more. */
1977 append_insns (&buildaddr
, i
, buf
);
1978 current_insn_ptr
= buildaddr
;
1981 /* FN's prototype is `LONGEST(*fn)(int)'. */
1984 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1986 unsigned char buf
[16];
1988 CORE_ADDR buildaddr
;
1990 buildaddr
= current_insn_ptr
;
1992 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1993 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1995 append_insns (&buildaddr
, i
, buf
);
1996 current_insn_ptr
= buildaddr
;
1997 amd64_emit_call (fn
);
2000 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2003 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2005 unsigned char buf
[16];
2007 CORE_ADDR buildaddr
;
2009 buildaddr
= current_insn_ptr
;
2011 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2012 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2014 append_insns (&buildaddr
, i
, buf
);
2015 current_insn_ptr
= buildaddr
;
2016 EMIT_ASM (amd64_void_call_2_a
,
2017 /* Save away a copy of the stack top. */
2019 /* Also pass top as the second argument. */
2021 amd64_emit_call (fn
);
2022 EMIT_ASM (amd64_void_call_2_b
,
2023 /* Restore the stack top, %rax may have been trashed. */
2028 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2031 "cmp %rax,(%rsp)\n\t"
2032 "jne .Lamd64_eq_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_eq_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2048 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2051 "cmp %rax,(%rsp)\n\t"
2052 "je .Lamd64_ne_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_ne_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2068 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2071 "cmp %rax,(%rsp)\n\t"
2072 "jnl .Lamd64_lt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_lt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2088 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnle .Lamd64_le_fallthru\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2095 /* jmp, but don't trust the assembler to choose the right jump */
2096 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2097 ".Lamd64_le_fallthru:\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2108 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2111 "cmp %rax,(%rsp)\n\t"
2112 "jng .Lamd64_gt_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_gt_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2128 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2131 "cmp %rax,(%rsp)\n\t"
2132 "jnge .Lamd64_ge_fallthru\n\t"
2133 ".Lamd64_ge_jump:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2136 /* jmp, but don't trust the assembler to choose the right jump */
2137 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2138 ".Lamd64_ge_fallthru:\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2148 struct emit_ops amd64_emit_ops
=
2150 amd64_emit_prologue
,
2151 amd64_emit_epilogue
,
2156 amd64_emit_rsh_signed
,
2157 amd64_emit_rsh_unsigned
,
2165 amd64_emit_less_signed
,
2166 amd64_emit_less_unsigned
,
2170 amd64_write_goto_address
,
2175 amd64_emit_stack_flush
,
2176 amd64_emit_zero_ext
,
2178 amd64_emit_stack_adjust
,
2179 amd64_emit_int_call_1
,
2180 amd64_emit_void_call_2
,
2189 #endif /* __x86_64__ */
2192 i386_emit_prologue (void)
2194 EMIT_ASM32 (i386_prologue
,
2198 /* At this point, the raw regs base address is at 8(%ebp), and the
2199 value pointer is at 12(%ebp). */
2203 i386_emit_epilogue (void)
2205 EMIT_ASM32 (i386_epilogue
,
2206 "mov 12(%ebp),%ecx\n\t"
2207 "mov %eax,(%ecx)\n\t"
2208 "mov %ebx,0x4(%ecx)\n\t"
2216 i386_emit_add (void)
2218 EMIT_ASM32 (i386_add
,
2219 "add (%esp),%eax\n\t"
2220 "adc 0x4(%esp),%ebx\n\t"
2221 "lea 0x8(%esp),%esp");
2225 i386_emit_sub (void)
2227 EMIT_ASM32 (i386_sub
,
2228 "subl %eax,(%esp)\n\t"
2229 "sbbl %ebx,4(%esp)\n\t"
2235 i386_emit_mul (void)
2241 i386_emit_lsh (void)
2247 i386_emit_rsh_signed (void)
2253 i386_emit_rsh_unsigned (void)
2259 i386_emit_ext (int arg
)
2264 EMIT_ASM32 (i386_ext_8
,
2267 "movl %eax,%ebx\n\t"
2271 EMIT_ASM32 (i386_ext_16
,
2273 "movl %eax,%ebx\n\t"
2277 EMIT_ASM32 (i386_ext_32
,
2278 "movl %eax,%ebx\n\t"
2287 i386_emit_log_not (void)
2289 EMIT_ASM32 (i386_log_not
,
2291 "test %eax,%eax\n\t"
2298 i386_emit_bit_and (void)
2300 EMIT_ASM32 (i386_and
,
2301 "and (%esp),%eax\n\t"
2302 "and 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2307 i386_emit_bit_or (void)
2309 EMIT_ASM32 (i386_or
,
2310 "or (%esp),%eax\n\t"
2311 "or 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2316 i386_emit_bit_xor (void)
2318 EMIT_ASM32 (i386_xor
,
2319 "xor (%esp),%eax\n\t"
2320 "xor 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2325 i386_emit_bit_not (void)
2327 EMIT_ASM32 (i386_bit_not
,
2328 "xor $0xffffffff,%eax\n\t"
2329 "xor $0xffffffff,%ebx\n\t");
2333 i386_emit_equal (void)
2335 EMIT_ASM32 (i386_equal
,
2336 "cmpl %ebx,4(%esp)\n\t"
2337 "jne .Li386_equal_false\n\t"
2338 "cmpl %eax,(%esp)\n\t"
2339 "je .Li386_equal_true\n\t"
2340 ".Li386_equal_false:\n\t"
2342 "jmp .Li386_equal_end\n\t"
2343 ".Li386_equal_true:\n\t"
2345 ".Li386_equal_end:\n\t"
2347 "lea 0x8(%esp),%esp");
2351 i386_emit_less_signed (void)
2353 EMIT_ASM32 (i386_less_signed
,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jl .Li386_less_signed_true\n\t"
2356 "jne .Li386_less_signed_false\n\t"
2357 "cmpl %eax,(%esp)\n\t"
2358 "jl .Li386_less_signed_true\n\t"
2359 ".Li386_less_signed_false:\n\t"
2361 "jmp .Li386_less_signed_end\n\t"
2362 ".Li386_less_signed_true:\n\t"
2364 ".Li386_less_signed_end:\n\t"
2366 "lea 0x8(%esp),%esp");
2370 i386_emit_less_unsigned (void)
2372 EMIT_ASM32 (i386_less_unsigned
,
2373 "cmpl %ebx,4(%esp)\n\t"
2374 "jb .Li386_less_unsigned_true\n\t"
2375 "jne .Li386_less_unsigned_false\n\t"
2376 "cmpl %eax,(%esp)\n\t"
2377 "jb .Li386_less_unsigned_true\n\t"
2378 ".Li386_less_unsigned_false:\n\t"
2380 "jmp .Li386_less_unsigned_end\n\t"
2381 ".Li386_less_unsigned_true:\n\t"
2383 ".Li386_less_unsigned_end:\n\t"
2385 "lea 0x8(%esp),%esp");
2389 i386_emit_ref (int size
)
2394 EMIT_ASM32 (i386_ref1
,
2398 EMIT_ASM32 (i386_ref2
,
2402 EMIT_ASM32 (i386_ref4
,
2403 "movl (%eax),%eax");
2406 EMIT_ASM32 (i386_ref8
,
2407 "movl 4(%eax),%ebx\n\t"
2408 "movl (%eax),%eax");
2414 i386_emit_if_goto (int *offset_p
, int *size_p
)
2416 EMIT_ASM32 (i386_if_goto
,
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2426 *offset_p
= 11; /* be sure that this matches the sequence above */
2432 i386_emit_goto (int *offset_p
, int *size_p
)
2434 EMIT_ASM32 (i386_goto
,
2435 /* Don't trust the assembler to choose the right jump */
2436 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2444 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2446 int diff
= (to
- (from
+ size
));
2447 unsigned char buf
[sizeof (int)];
2449 /* We're only doing 4-byte sizes at the moment. */
2456 memcpy (buf
, &diff
, sizeof (int));
2457 target_write_memory (from
, buf
, sizeof (int));
2461 i386_emit_const (LONGEST num
)
2463 unsigned char buf
[16];
2465 CORE_ADDR buildaddr
= current_insn_ptr
;
2468 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2469 lo
= num
& 0xffffffff;
2470 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2472 hi
= ((num
>> 32) & 0xffffffff);
2475 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2476 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2481 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2483 append_insns (&buildaddr
, i
, buf
);
2484 current_insn_ptr
= buildaddr
;
2488 i386_emit_call (CORE_ADDR fn
)
2490 unsigned char buf
[16];
2492 CORE_ADDR buildaddr
;
2494 buildaddr
= current_insn_ptr
;
2496 buf
[i
++] = 0xe8; /* call <reladdr> */
2497 offset
= ((int) fn
) - (buildaddr
+ 5);
2498 memcpy (buf
+ 1, &offset
, 4);
2499 append_insns (&buildaddr
, 5, buf
);
2500 current_insn_ptr
= buildaddr
;
2504 i386_emit_reg (int reg
)
2506 unsigned char buf
[16];
2508 CORE_ADDR buildaddr
;
2510 EMIT_ASM32 (i386_reg_a
,
2512 buildaddr
= current_insn_ptr
;
2514 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2515 memcpy (&buf
[i
], ®
, sizeof (reg
));
2517 append_insns (&buildaddr
, i
, buf
);
2518 current_insn_ptr
= buildaddr
;
2519 EMIT_ASM32 (i386_reg_b
,
2520 "mov %eax,4(%esp)\n\t"
2521 "mov 8(%ebp),%eax\n\t"
2523 i386_emit_call (get_raw_reg_func_addr ());
2524 EMIT_ASM32 (i386_reg_c
,
2526 "lea 0x8(%esp),%esp");
2530 i386_emit_pop (void)
2532 EMIT_ASM32 (i386_pop
,
2538 i386_emit_stack_flush (void)
2540 EMIT_ASM32 (i386_stack_flush
,
2546 i386_emit_zero_ext (int arg
)
2551 EMIT_ASM32 (i386_zero_ext_8
,
2552 "and $0xff,%eax\n\t"
2556 EMIT_ASM32 (i386_zero_ext_16
,
2557 "and $0xffff,%eax\n\t"
2561 EMIT_ASM32 (i386_zero_ext_32
,
2570 i386_emit_swap (void)
2572 EMIT_ASM32 (i386_swap
,
2582 i386_emit_stack_adjust (int n
)
2584 unsigned char buf
[16];
2586 CORE_ADDR buildaddr
= current_insn_ptr
;
2589 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2593 append_insns (&buildaddr
, i
, buf
);
2594 current_insn_ptr
= buildaddr
;
2597 /* FN's prototype is `LONGEST(*fn)(int)'. */
2600 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2602 unsigned char buf
[16];
2604 CORE_ADDR buildaddr
;
2606 EMIT_ASM32 (i386_int_call_1_a
,
2607 /* Reserve a bit of stack space. */
2609 /* Put the one argument on the stack. */
2610 buildaddr
= current_insn_ptr
;
2612 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2615 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2617 append_insns (&buildaddr
, i
, buf
);
2618 current_insn_ptr
= buildaddr
;
2619 i386_emit_call (fn
);
2620 EMIT_ASM32 (i386_int_call_1_c
,
2622 "lea 0x8(%esp),%esp");
2625 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2628 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2630 unsigned char buf
[16];
2632 CORE_ADDR buildaddr
;
2634 EMIT_ASM32 (i386_void_call_2_a
,
2635 /* Preserve %eax only; we don't have to worry about %ebx. */
2637 /* Reserve a bit of stack space for arguments. */
2638 "sub $0x10,%esp\n\t"
2639 /* Copy "top" to the second argument position. (Note that
2640 we can't assume function won't scribble on its
2641 arguments, so don't try to restore from this.) */
2642 "mov %eax,4(%esp)\n\t"
2643 "mov %ebx,8(%esp)");
2644 /* Put the first argument on the stack. */
2645 buildaddr
= current_insn_ptr
;
2647 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2650 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2652 append_insns (&buildaddr
, i
, buf
);
2653 current_insn_ptr
= buildaddr
;
2654 i386_emit_call (fn
);
2655 EMIT_ASM32 (i386_void_call_2_b
,
2656 "lea 0x10(%esp),%esp\n\t"
2657 /* Restore original stack top. */
2663 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2666 /* Check low half first, more likely to be decider */
2667 "cmpl %eax,(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "cmpl %ebx,4(%esp)\n\t"
2670 "jne .Leq_fallthru\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2674 /* jmp, but don't trust the assembler to choose the right jump */
2675 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2676 ".Leq_fallthru:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2688 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2691 /* Check low half first, more likely to be decider */
2692 "cmpl %eax,(%esp)\n\t"
2694 "cmpl %ebx,4(%esp)\n\t"
2695 "je .Lne_fallthru\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2700 /* jmp, but don't trust the assembler to choose the right jump */
2701 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2702 ".Lne_fallthru:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2714 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2717 "cmpl %ebx,4(%esp)\n\t"
2719 "jne .Llt_fallthru\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "jnl .Llt_fallthru\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2726 /* jmp, but don't trust the assembler to choose the right jump */
2727 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2728 ".Llt_fallthru:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2740 i386_emit_le_goto (int *offset_p
, int *size_p
)
2743 "cmpl %ebx,4(%esp)\n\t"
2745 "jne .Lle_fallthru\n\t"
2746 "cmpl %eax,(%esp)\n\t"
2747 "jnle .Lle_fallthru\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Lle_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2766 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2769 "cmpl %ebx,4(%esp)\n\t"
2771 "jne .Lgt_fallthru\n\t"
2772 "cmpl %eax,(%esp)\n\t"
2773 "jng .Lgt_fallthru\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lgt_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2792 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2795 "cmpl %ebx,4(%esp)\n\t"
2797 "jne .Lge_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnge .Lge_fallthru\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Lge_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2817 struct emit_ops i386_emit_ops
=
2825 i386_emit_rsh_signed
,
2826 i386_emit_rsh_unsigned
,
2834 i386_emit_less_signed
,
2835 i386_emit_less_unsigned
,
2839 i386_write_goto_address
,
2844 i386_emit_stack_flush
,
2847 i386_emit_stack_adjust
,
2848 i386_emit_int_call_1
,
2849 i386_emit_void_call_2
,
2859 static struct emit_ops
*
2863 if (is_64bit_tdesc ())
2864 return &amd64_emit_ops
;
2867 return &i386_emit_ops
;
2870 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2873 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2875 *size
= x86_breakpoint_len
;
2876 return x86_breakpoint
;
2880 x86_supports_range_stepping (void)
2885 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2889 x86_supports_hardware_single_step (void)
2895 x86_get_ipa_tdesc_idx (void)
2897 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2898 const struct target_desc
*tdesc
= regcache
->tdesc
;
2901 return amd64_get_ipa_tdesc_idx (tdesc
);
2904 if (tdesc
== tdesc_i386_linux_no_xml
)
2905 return X86_TDESC_SSE
;
2907 return i386_get_ipa_tdesc_idx (tdesc
);
2910 /* This is initialized assuming an amd64 target.
2911 x86_arch_setup will correct it for i386 or amd64 targets. */
2913 struct linux_target_ops the_low_target
=
2917 x86_stopped_by_watchpoint
,
2918 x86_stopped_data_address
,
2919 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2920 native i386 case (no registers smaller than an xfer unit), and are not
2921 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2924 /* need to fix up i386 siginfo if host is amd64 */
2926 x86_linux_new_process
,
2927 x86_linux_delete_process
,
2928 x86_linux_new_thread
,
2929 x86_linux_delete_thread
,
2931 x86_linux_prepare_to_resume
,
2932 x86_linux_process_qsupported
,
2933 x86_supports_tracepoints
,
2934 x86_get_thread_area
,
2935 x86_install_fast_tracepoint_jump_pad
,
2937 x86_get_min_fast_tracepoint_insn_len
,
2938 x86_supports_range_stepping
,
2939 x86_supports_hardware_single_step
,
2940 x86_get_syscall_trapinfo
,
2941 x86_get_ipa_tdesc_idx
,
2944 /* The linux target ops object. */
2946 linux_process_target
*the_linux_target
= &the_x86_target
;
2949 initialize_low_arch (void)
2951 /* Initialize the Linux target descriptions. */
2953 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2954 copy_target_description (tdesc_amd64_linux_no_xml
,
2955 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2957 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2960 tdesc_i386_linux_no_xml
= allocate_target_description ();
2961 copy_target_description (tdesc_i386_linux_no_xml
,
2962 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2963 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2965 initialize_regsets_info (&x86_regsets_info
);