1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
109 void low_arch_setup () override
;
112 /* The singleton target ops object. */
114 static x86_target the_x86_target
;
116 /* Per-process arch-specific data we want to keep. */
118 struct arch_process_info
120 struct x86_debug_reg_state debug_reg_state
;
125 /* Mapping between the general-purpose registers in `struct user'
126 format and GDB's register array layout.
127 Note that the transfer layout uses 64-bit regs. */
128 static /*const*/ int i386_regmap
[] =
130 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
131 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
132 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
133 DS
* 8, ES
* 8, FS
* 8, GS
* 8
136 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
138 /* So code below doesn't have to care, i386 or amd64. */
139 #define ORIG_EAX ORIG_RAX
142 static const int x86_64_regmap
[] =
144 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
145 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
146 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
147 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
148 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
149 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
152 -1, -1, -1, -1, -1, -1, -1, -1,
154 -1, -1, -1, -1, -1, -1, -1, -1,
156 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
161 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
162 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
163 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
168 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
169 -1, -1, -1, -1, -1, -1, -1, -1,
170 -1, -1, -1, -1, -1, -1, -1, -1,
171 -1, -1, -1, -1, -1, -1, -1, -1,
175 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
176 #define X86_64_USER_REGS (GS + 1)
178 #else /* ! __x86_64__ */
180 /* Mapping between the general-purpose registers in `struct user'
181 format and GDB's register array layout. */
182 static /*const*/ int i386_regmap
[] =
184 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
185 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
186 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
187 DS
* 4, ES
* 4, FS
* 4, GS
* 4
190 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
198 /* Returns true if the current inferior belongs to a x86-64 process,
202 is_64bit_tdesc (void)
204 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
206 return register_size (regcache
->tdesc
, 0) == 8;
212 /* Called by libthread_db. */
215 ps_get_thread_area (struct ps_prochandle
*ph
,
216 lwpid_t lwpid
, int idx
, void **base
)
219 int use_64bit
= is_64bit_tdesc ();
226 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
230 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
241 unsigned int desc
[4];
243 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
244 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
247 /* Ensure we properly extend the value to 64-bits for x86_64. */
248 *base
= (void *) (uintptr_t) desc
[1];
253 /* Get the thread area address. This is used to recognize which
254 thread is which when tracing with the in-process agent library. We
255 don't read anything from the address, and treat it as opaque; it's
256 the address itself that we assume is unique per-thread. */
259 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
262 int use_64bit
= is_64bit_tdesc ();
267 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
269 *addr
= (CORE_ADDR
) (uintptr_t) base
;
278 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
279 struct thread_info
*thr
= get_lwp_thread (lwp
);
280 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
281 unsigned int desc
[4];
283 const int reg_thread_area
= 3; /* bits to scale down register value. */
286 collect_register_by_name (regcache
, "gs", &gs
);
288 idx
= gs
>> reg_thread_area
;
290 if (ptrace (PTRACE_GET_THREAD_AREA
,
292 (void *) (long) idx
, (unsigned long) &desc
) < 0)
303 x86_cannot_store_register (int regno
)
306 if (is_64bit_tdesc ())
310 return regno
>= I386_NUM_REGS
;
314 x86_cannot_fetch_register (int regno
)
317 if (is_64bit_tdesc ())
321 return regno
>= I386_NUM_REGS
;
325 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
330 if (register_size (regcache
->tdesc
, 0) == 8)
332 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
333 if (x86_64_regmap
[i
] != -1)
334 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
336 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
339 int lwpid
= lwpid_of (current_thread
);
341 collect_register_by_name (regcache
, "fs_base", &base
);
342 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
344 collect_register_by_name (regcache
, "gs_base", &base
);
345 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
352 /* 32-bit inferior registers need to be zero-extended.
353 Callers would read uninitialized memory otherwise. */
354 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
357 for (i
= 0; i
< I386_NUM_REGS
; i
++)
358 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
360 collect_register_by_name (regcache
, "orig_eax",
361 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
364 /* Sign extend EAX value to avoid potential syscall restart
367 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
368 for a detailed explanation. */
369 if (register_size (regcache
->tdesc
, 0) == 4)
371 void *ptr
= ((gdb_byte
*) buf
372 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
374 *(int64_t *) ptr
= *(int32_t *) ptr
;
380 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
385 if (register_size (regcache
->tdesc
, 0) == 8)
387 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
388 if (x86_64_regmap
[i
] != -1)
389 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
391 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
394 int lwpid
= lwpid_of (current_thread
);
396 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
397 supply_register_by_name (regcache
, "fs_base", &base
);
399 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
400 supply_register_by_name (regcache
, "gs_base", &base
);
407 for (i
= 0; i
< I386_NUM_REGS
; i
++)
408 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
410 supply_register_by_name (regcache
, "orig_eax",
411 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
415 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_fxsave (regcache
, buf
);
420 i387_cache_to_fsave (regcache
, buf
);
425 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
428 i387_fxsave_to_cache (regcache
, buf
);
430 i387_fsave_to_cache (regcache
, buf
);
437 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
439 i387_cache_to_fxsave (regcache
, buf
);
443 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
445 i387_fxsave_to_cache (regcache
, buf
);
451 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
453 i387_cache_to_xsave (regcache
, buf
);
457 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
459 i387_xsave_to_cache (regcache
, buf
);
462 /* ??? The non-biarch i386 case stores all the i387 regs twice.
463 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
464 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
465 doesn't work. IWBN to avoid the duplication in the case where it
466 does work. Maybe the arch_setup routine could check whether it works
467 and update the supported regsets accordingly. */
469 static struct regset_info x86_regsets
[] =
471 #ifdef HAVE_PTRACE_GETREGS
472 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
474 x86_fill_gregset
, x86_store_gregset
},
475 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
476 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
478 # ifdef HAVE_PTRACE_GETFPXREGS
479 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
481 x86_fill_fpxregset
, x86_store_fpxregset
},
484 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
486 x86_fill_fpregset
, x86_store_fpregset
},
487 #endif /* HAVE_PTRACE_GETREGS */
492 x86_get_pc (struct regcache
*regcache
)
494 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
500 collect_register_by_name (regcache
, "rip", &pc
);
501 return (CORE_ADDR
) pc
;
507 collect_register_by_name (regcache
, "eip", &pc
);
508 return (CORE_ADDR
) pc
;
513 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
515 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
521 supply_register_by_name (regcache
, "rip", &newpc
);
527 supply_register_by_name (regcache
, "eip", &newpc
);
531 static const gdb_byte x86_breakpoint
[] = { 0xCC };
532 #define x86_breakpoint_len 1
535 x86_breakpoint_at (CORE_ADDR pc
)
539 the_target
->read_memory (pc
, &c
, 1);
546 /* Low-level function vector. */
547 struct x86_dr_low_type x86_dr_low
=
549 x86_linux_dr_set_control
,
550 x86_linux_dr_set_addr
,
551 x86_linux_dr_get_addr
,
552 x86_linux_dr_get_status
,
553 x86_linux_dr_get_control
,
557 /* Breakpoint/Watchpoint support. */
560 x86_supports_z_point_type (char z_type
)
566 case Z_PACKET_WRITE_WP
:
567 case Z_PACKET_ACCESS_WP
:
575 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
576 int size
, struct raw_breakpoint
*bp
)
578 struct process_info
*proc
= current_process ();
582 case raw_bkpt_type_hw
:
583 case raw_bkpt_type_write_wp
:
584 case raw_bkpt_type_access_wp
:
586 enum target_hw_bp_type hw_type
587 = raw_bkpt_type_to_target_hw_bp_type (type
);
588 struct x86_debug_reg_state
*state
589 = &proc
->priv
->arch_private
->debug_reg_state
;
591 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
601 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
602 int size
, struct raw_breakpoint
*bp
)
604 struct process_info
*proc
= current_process ();
608 case raw_bkpt_type_hw
:
609 case raw_bkpt_type_write_wp
:
610 case raw_bkpt_type_access_wp
:
612 enum target_hw_bp_type hw_type
613 = raw_bkpt_type_to_target_hw_bp_type (type
);
614 struct x86_debug_reg_state
*state
615 = &proc
->priv
->arch_private
->debug_reg_state
;
617 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
626 x86_stopped_by_watchpoint (void)
628 struct process_info
*proc
= current_process ();
629 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
633 x86_stopped_data_address (void)
635 struct process_info
*proc
= current_process ();
637 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
643 /* Called when a new process is created. */
645 static struct arch_process_info
*
646 x86_linux_new_process (void)
648 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
650 x86_low_init_dregs (&info
->debug_reg_state
);
655 /* Called when a process is being deleted. */
658 x86_linux_delete_process (struct arch_process_info
*info
)
663 /* Target routine for linux_new_fork. */
666 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
668 /* These are allocated by linux_add_process. */
669 gdb_assert (parent
->priv
!= NULL
670 && parent
->priv
->arch_private
!= NULL
);
671 gdb_assert (child
->priv
!= NULL
672 && child
->priv
->arch_private
!= NULL
);
674 /* Linux kernel before 2.6.33 commit
675 72f674d203cd230426437cdcf7dd6f681dad8b0d
676 will inherit hardware debug registers from parent
677 on fork/vfork/clone. Newer Linux kernels create such tasks with
678 zeroed debug registers.
680 GDB core assumes the child inherits the watchpoints/hw
681 breakpoints of the parent, and will remove them all from the
682 forked off process. Copy the debug registers mirrors into the
683 new process so that all breakpoints and watchpoints can be
684 removed together. The debug registers mirror will become zeroed
685 in the end before detaching the forked off process, thus making
686 this compatible with older Linux kernels too. */
688 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
691 /* See nat/x86-dregs.h. */
693 struct x86_debug_reg_state
*
694 x86_debug_reg_state (pid_t pid
)
696 struct process_info
*proc
= find_process_pid (pid
);
698 return &proc
->priv
->arch_private
->debug_reg_state
;
701 /* When GDBSERVER is built as a 64-bit application on linux, the
702 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
703 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
704 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
705 conversion in-place ourselves. */
707 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
708 layout of the inferiors' architecture. Returns true if any
709 conversion was done; false otherwise. If DIRECTION is 1, then copy
710 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
714 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
717 unsigned int machine
;
718 int tid
= lwpid_of (current_thread
);
719 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
721 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
722 if (!is_64bit_tdesc ())
723 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
725 /* No fixup for native x32 GDB. */
726 else if (!is_elf64
&& sizeof (void *) == 8)
727 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
736 /* Format of XSAVE extended state is:
740 sw_usable_bytes[464..511]
741 xstate_hdr_bytes[512..575]
746 Same memory layout will be used for the coredump NT_X86_XSTATE
747 representing the XSAVE extended state registers.
749 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
750 extended state mask, which is the same as the extended control register
751 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
752 together with the mask saved in the xstate_hdr_bytes to determine what
753 states the processor/OS supports and what state, used or initialized,
754 the process/thread is in. */
755 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
757 /* Does the current host support the GETFPXREGS request? The header
758 file may or may not define it, and even if it is defined, the
759 kernel will return EIO if it's running on a pre-SSE processor. */
760 int have_ptrace_getfpxregs
=
761 #ifdef HAVE_PTRACE_GETFPXREGS
768 /* Get Linux/x86 target description from running target. */
770 static const struct target_desc
*
771 x86_linux_read_description (void)
773 unsigned int machine
;
777 static uint64_t xcr0
;
778 struct regset_info
*regset
;
780 tid
= lwpid_of (current_thread
);
782 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
784 if (sizeof (void *) == 4)
787 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
789 else if (machine
== EM_X86_64
)
790 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
794 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
795 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
797 elf_fpxregset_t fpxregs
;
799 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
801 have_ptrace_getfpxregs
= 0;
802 have_ptrace_getregset
= 0;
803 return i386_linux_read_description (X86_XSTATE_X87
);
806 have_ptrace_getfpxregs
= 1;
812 x86_xcr0
= X86_XSTATE_SSE_MASK
;
816 if (machine
== EM_X86_64
)
817 return tdesc_amd64_linux_no_xml
;
820 return tdesc_i386_linux_no_xml
;
823 if (have_ptrace_getregset
== -1)
825 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
828 iov
.iov_base
= xstateregs
;
829 iov
.iov_len
= sizeof (xstateregs
);
831 /* Check if PTRACE_GETREGSET works. */
832 if (ptrace (PTRACE_GETREGSET
, tid
,
833 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
834 have_ptrace_getregset
= 0;
837 have_ptrace_getregset
= 1;
839 /* Get XCR0 from XSAVE extended state. */
840 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
841 / sizeof (uint64_t))];
843 /* Use PTRACE_GETREGSET if it is available. */
844 for (regset
= x86_regsets
;
845 regset
->fill_function
!= NULL
; regset
++)
846 if (regset
->get_request
== PTRACE_GETREGSET
)
847 regset
->size
= X86_XSTATE_SIZE (xcr0
);
848 else if (regset
->type
!= GENERAL_REGS
)
853 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
854 xcr0_features
= (have_ptrace_getregset
855 && (xcr0
& X86_XSTATE_ALL_MASK
));
860 if (machine
== EM_X86_64
)
863 const target_desc
*tdesc
= NULL
;
867 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
872 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
878 const target_desc
*tdesc
= NULL
;
881 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
884 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
889 gdb_assert_not_reached ("failed to return tdesc");
892 /* Update all the target description of all processes; a new GDB
893 connected, and it may or not support xml target descriptions. */
896 x86_target::update_xmltarget ()
898 struct thread_info
*saved_thread
= current_thread
;
900 /* Before changing the register cache's internal layout, flush the
901 contents of the current valid caches back to the threads, and
902 release the current regcache objects. */
905 for_each_process ([this] (process_info
*proc
) {
908 /* Look up any thread of this process. */
909 current_thread
= find_any_thread_of_pid (pid
);
914 current_thread
= saved_thread
;
917 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
921 x86_linux_process_qsupported (char **features
, int count
)
925 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
926 with "i386" in qSupported query, it supports x86 XML target
929 for (i
= 0; i
< count
; i
++)
931 const char *feature
= features
[i
];
933 if (startswith (feature
, "xmlRegisters="))
935 char *copy
= xstrdup (feature
+ 13);
938 for (char *p
= strtok_r (copy
, ",", &saveptr
);
940 p
= strtok_r (NULL
, ",", &saveptr
))
942 if (strcmp (p
, "i386") == 0)
952 the_x86_target
.update_xmltarget ();
955 /* Common for x86/x86-64. */
957 static struct regsets_info x86_regsets_info
=
959 x86_regsets
, /* regsets */
961 NULL
, /* disabled_regsets */
965 static struct regs_info amd64_linux_regs_info
=
967 NULL
, /* regset_bitmap */
968 NULL
, /* usrregs_info */
972 static struct usrregs_info i386_linux_usrregs_info
=
978 static struct regs_info i386_linux_regs_info
=
980 NULL
, /* regset_bitmap */
981 &i386_linux_usrregs_info
,
985 static const struct regs_info
*
986 x86_linux_regs_info (void)
989 if (is_64bit_tdesc ())
990 return &amd64_linux_regs_info
;
993 return &i386_linux_regs_info
;
996 /* Initialize the target description for the architecture of the
1000 x86_target::low_arch_setup ()
1002 current_process ()->tdesc
= x86_linux_read_description ();
1005 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1006 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1009 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1011 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1017 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1018 *sysno
= (int) l_sysno
;
1021 collect_register_by_name (regcache
, "orig_eax", sysno
);
1025 x86_supports_tracepoints (void)
1031 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1033 target_write_memory (*to
, buf
, len
);
1038 push_opcode (unsigned char *buf
, const char *op
)
1040 unsigned char *buf_org
= buf
;
1045 unsigned long ul
= strtoul (op
, &endptr
, 16);
1054 return buf
- buf_org
;
1059 /* Build a jump pad that saves registers and calls a collection
1060 function. Writes a jump instruction to the jump pad to
1061 JJUMPAD_INSN. The caller is responsible to write it in at the
1062 tracepoint address. */
1065 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1066 CORE_ADDR collector
,
1069 CORE_ADDR
*jump_entry
,
1070 CORE_ADDR
*trampoline
,
1071 ULONGEST
*trampoline_size
,
1072 unsigned char *jjump_pad_insn
,
1073 ULONGEST
*jjump_pad_insn_size
,
1074 CORE_ADDR
*adjusted_insn_addr
,
1075 CORE_ADDR
*adjusted_insn_addr_end
,
1078 unsigned char buf
[40];
1082 CORE_ADDR buildaddr
= *jump_entry
;
1084 /* Build the jump pad. */
1086 /* First, do tracepoint data collection. Save registers. */
1088 /* Need to ensure stack pointer saved first. */
1089 buf
[i
++] = 0x54; /* push %rsp */
1090 buf
[i
++] = 0x55; /* push %rbp */
1091 buf
[i
++] = 0x57; /* push %rdi */
1092 buf
[i
++] = 0x56; /* push %rsi */
1093 buf
[i
++] = 0x52; /* push %rdx */
1094 buf
[i
++] = 0x51; /* push %rcx */
1095 buf
[i
++] = 0x53; /* push %rbx */
1096 buf
[i
++] = 0x50; /* push %rax */
1097 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1098 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1099 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1100 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1101 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1102 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1103 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1104 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1105 buf
[i
++] = 0x9c; /* pushfq */
1106 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1108 memcpy (buf
+ i
, &tpaddr
, 8);
1110 buf
[i
++] = 0x57; /* push %rdi */
1111 append_insns (&buildaddr
, i
, buf
);
1113 /* Stack space for the collecting_t object. */
1115 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1116 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1117 memcpy (buf
+ i
, &tpoint
, 8);
1119 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1120 i
+= push_opcode (&buf
[i
],
1121 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1122 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1123 append_insns (&buildaddr
, i
, buf
);
1127 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1128 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1130 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1131 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1132 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1133 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1134 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1135 append_insns (&buildaddr
, i
, buf
);
1137 /* Set up the gdb_collect call. */
1138 /* At this point, (stack pointer + 0x18) is the base of our saved
1142 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1143 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1145 /* tpoint address may be 64-bit wide. */
1146 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1147 memcpy (buf
+ i
, &tpoint
, 8);
1149 append_insns (&buildaddr
, i
, buf
);
1151 /* The collector function being in the shared library, may be
1152 >31-bits away off the jump pad. */
1154 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1155 memcpy (buf
+ i
, &collector
, 8);
1157 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1158 append_insns (&buildaddr
, i
, buf
);
1160 /* Clear the spin-lock. */
1162 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1163 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1164 memcpy (buf
+ i
, &lockaddr
, 8);
1166 append_insns (&buildaddr
, i
, buf
);
1168 /* Remove stack that had been used for the collect_t object. */
1170 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1171 append_insns (&buildaddr
, i
, buf
);
1173 /* Restore register state. */
1175 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1179 buf
[i
++] = 0x9d; /* popfq */
1180 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1181 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1182 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1183 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1184 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1185 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1186 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1187 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1188 buf
[i
++] = 0x58; /* pop %rax */
1189 buf
[i
++] = 0x5b; /* pop %rbx */
1190 buf
[i
++] = 0x59; /* pop %rcx */
1191 buf
[i
++] = 0x5a; /* pop %rdx */
1192 buf
[i
++] = 0x5e; /* pop %rsi */
1193 buf
[i
++] = 0x5f; /* pop %rdi */
1194 buf
[i
++] = 0x5d; /* pop %rbp */
1195 buf
[i
++] = 0x5c; /* pop %rsp */
1196 append_insns (&buildaddr
, i
, buf
);
1198 /* Now, adjust the original instruction to execute in the jump
1200 *adjusted_insn_addr
= buildaddr
;
1201 relocate_instruction (&buildaddr
, tpaddr
);
1202 *adjusted_insn_addr_end
= buildaddr
;
1204 /* Finally, write a jump back to the program. */
1206 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1207 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1210 "E.Jump back from jump pad too far from tracepoint "
1211 "(offset 0x%" PRIx64
" > int32).", loffset
);
1215 offset
= (int) loffset
;
1216 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1217 memcpy (buf
+ 1, &offset
, 4);
1218 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1220 /* The jump pad is now built. Wire in a jump to our jump pad. This
1221 is always done last (by our caller actually), so that we can
1222 install fast tracepoints with threads running. This relies on
1223 the agent's atomic write support. */
1224 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1225 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1228 "E.Jump pad too far from tracepoint "
1229 "(offset 0x%" PRIx64
" > int32).", loffset
);
1233 offset
= (int) loffset
;
1235 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1236 memcpy (buf
+ 1, &offset
, 4);
1237 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1238 *jjump_pad_insn_size
= sizeof (jump_insn
);
1240 /* Return the end address of our pad. */
1241 *jump_entry
= buildaddr
;
1246 #endif /* __x86_64__ */
1248 /* Build a jump pad that saves registers and calls a collection
1249 function. Writes a jump instruction to the jump pad to
1250 JJUMPAD_INSN. The caller is responsible to write it in at the
1251 tracepoint address. */
1254 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1255 CORE_ADDR collector
,
1258 CORE_ADDR
*jump_entry
,
1259 CORE_ADDR
*trampoline
,
1260 ULONGEST
*trampoline_size
,
1261 unsigned char *jjump_pad_insn
,
1262 ULONGEST
*jjump_pad_insn_size
,
1263 CORE_ADDR
*adjusted_insn_addr
,
1264 CORE_ADDR
*adjusted_insn_addr_end
,
1267 unsigned char buf
[0x100];
1269 CORE_ADDR buildaddr
= *jump_entry
;
1271 /* Build the jump pad. */
1273 /* First, do tracepoint data collection. Save registers. */
1275 buf
[i
++] = 0x60; /* pushad */
1276 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1277 *((int *)(buf
+ i
)) = (int) tpaddr
;
1279 buf
[i
++] = 0x9c; /* pushf */
1280 buf
[i
++] = 0x1e; /* push %ds */
1281 buf
[i
++] = 0x06; /* push %es */
1282 buf
[i
++] = 0x0f; /* push %fs */
1284 buf
[i
++] = 0x0f; /* push %gs */
1286 buf
[i
++] = 0x16; /* push %ss */
1287 buf
[i
++] = 0x0e; /* push %cs */
1288 append_insns (&buildaddr
, i
, buf
);
1290 /* Stack space for the collecting_t object. */
1292 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1294 /* Build the object. */
1295 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1296 memcpy (buf
+ i
, &tpoint
, 4);
1298 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1300 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1301 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1302 append_insns (&buildaddr
, i
, buf
);
1304 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1305 If we cared for it, this could be using xchg alternatively. */
1308 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1309 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1311 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1313 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1314 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1315 append_insns (&buildaddr
, i
, buf
);
1318 /* Set up arguments to the gdb_collect call. */
1320 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1321 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1322 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1323 append_insns (&buildaddr
, i
, buf
);
1326 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1327 append_insns (&buildaddr
, i
, buf
);
1330 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1331 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1333 append_insns (&buildaddr
, i
, buf
);
1335 buf
[0] = 0xe8; /* call <reladdr> */
1336 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1337 memcpy (buf
+ 1, &offset
, 4);
1338 append_insns (&buildaddr
, 5, buf
);
1339 /* Clean up after the call. */
1340 buf
[0] = 0x83; /* add $0x8,%esp */
1343 append_insns (&buildaddr
, 3, buf
);
1346 /* Clear the spin-lock. This would need the LOCK prefix on older
1349 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1350 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1351 memcpy (buf
+ i
, &lockaddr
, 4);
1353 append_insns (&buildaddr
, i
, buf
);
1356 /* Remove stack that had been used for the collect_t object. */
1358 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1359 append_insns (&buildaddr
, i
, buf
);
1362 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1365 buf
[i
++] = 0x17; /* pop %ss */
1366 buf
[i
++] = 0x0f; /* pop %gs */
1368 buf
[i
++] = 0x0f; /* pop %fs */
1370 buf
[i
++] = 0x07; /* pop %es */
1371 buf
[i
++] = 0x1f; /* pop %ds */
1372 buf
[i
++] = 0x9d; /* popf */
1373 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1376 buf
[i
++] = 0x61; /* popad */
1377 append_insns (&buildaddr
, i
, buf
);
1379 /* Now, adjust the original instruction to execute in the jump
1381 *adjusted_insn_addr
= buildaddr
;
1382 relocate_instruction (&buildaddr
, tpaddr
);
1383 *adjusted_insn_addr_end
= buildaddr
;
1385 /* Write the jump back to the program. */
1386 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1387 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1388 memcpy (buf
+ 1, &offset
, 4);
1389 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1391 /* The jump pad is now built. Wire in a jump to our jump pad. This
1392 is always done last (by our caller actually), so that we can
1393 install fast tracepoints with threads running. This relies on
1394 the agent's atomic write support. */
1397 /* Create a trampoline. */
1398 *trampoline_size
= sizeof (jump_insn
);
1399 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1401 /* No trampoline space available. */
1403 "E.Cannot allocate trampoline space needed for fast "
1404 "tracepoints on 4-byte instructions.");
1408 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1409 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1410 memcpy (buf
+ 1, &offset
, 4);
1411 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1413 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1414 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1415 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1416 memcpy (buf
+ 2, &offset
, 2);
1417 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1418 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1422 /* Else use a 32-bit relative jump instruction. */
1423 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1424 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1425 memcpy (buf
+ 1, &offset
, 4);
1426 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1427 *jjump_pad_insn_size
= sizeof (jump_insn
);
1430 /* Return the end address of our pad. */
1431 *jump_entry
= buildaddr
;
1437 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1438 CORE_ADDR collector
,
1441 CORE_ADDR
*jump_entry
,
1442 CORE_ADDR
*trampoline
,
1443 ULONGEST
*trampoline_size
,
1444 unsigned char *jjump_pad_insn
,
1445 ULONGEST
*jjump_pad_insn_size
,
1446 CORE_ADDR
*adjusted_insn_addr
,
1447 CORE_ADDR
*adjusted_insn_addr_end
,
1451 if (is_64bit_tdesc ())
1452 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1453 collector
, lockaddr
,
1454 orig_size
, jump_entry
,
1455 trampoline
, trampoline_size
,
1457 jjump_pad_insn_size
,
1459 adjusted_insn_addr_end
,
1463 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1464 collector
, lockaddr
,
1465 orig_size
, jump_entry
,
1466 trampoline
, trampoline_size
,
1468 jjump_pad_insn_size
,
1470 adjusted_insn_addr_end
,
1474 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1478 x86_get_min_fast_tracepoint_insn_len (void)
1480 static int warned_about_fast_tracepoints
= 0;
1483 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1484 used for fast tracepoints. */
1485 if (is_64bit_tdesc ())
1489 if (agent_loaded_p ())
1491 char errbuf
[IPA_BUFSIZ
];
1495 /* On x86, if trampolines are available, then 4-byte jump instructions
1496 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1497 with a 4-byte offset are used instead. */
1498 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1502 /* GDB has no channel to explain to user why a shorter fast
1503 tracepoint is not possible, but at least make GDBserver
1504 mention that something has gone awry. */
1505 if (!warned_about_fast_tracepoints
)
1507 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1508 warned_about_fast_tracepoints
= 1;
1515 /* Indicate that the minimum length is currently unknown since the IPA
1516 has not loaded yet. */
1522 add_insns (unsigned char *start
, int len
)
1524 CORE_ADDR buildaddr
= current_insn_ptr
;
1527 debug_printf ("Adding %d bytes of insn at %s\n",
1528 len
, paddress (buildaddr
));
1530 append_insns (&buildaddr
, len
, start
);
1531 current_insn_ptr
= buildaddr
;
1534 /* Our general strategy for emitting code is to avoid specifying raw
1535 bytes whenever possible, and instead copy a block of inline asm
1536 that is embedded in the function. This is a little messy, because
1537 we need to keep the compiler from discarding what looks like dead
1538 code, plus suppress various warnings. */
1540 #define EMIT_ASM(NAME, INSNS) \
1543 extern unsigned char start_ ## NAME, end_ ## NAME; \
1544 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1545 __asm__ ("jmp end_" #NAME "\n" \
1546 "\t" "start_" #NAME ":" \
1548 "\t" "end_" #NAME ":"); \
1553 #define EMIT_ASM32(NAME,INSNS) \
1556 extern unsigned char start_ ## NAME, end_ ## NAME; \
1557 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1558 __asm__ (".code32\n" \
1559 "\t" "jmp end_" #NAME "\n" \
1560 "\t" "start_" #NAME ":\n" \
1562 "\t" "end_" #NAME ":\n" \
1568 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1575 amd64_emit_prologue (void)
1577 EMIT_ASM (amd64_prologue
,
1579 "movq %rsp,%rbp\n\t"
1580 "sub $0x20,%rsp\n\t"
1581 "movq %rdi,-8(%rbp)\n\t"
1582 "movq %rsi,-16(%rbp)");
1587 amd64_emit_epilogue (void)
1589 EMIT_ASM (amd64_epilogue
,
1590 "movq -16(%rbp),%rdi\n\t"
1591 "movq %rax,(%rdi)\n\t"
1598 amd64_emit_add (void)
1600 EMIT_ASM (amd64_add
,
1601 "add (%rsp),%rax\n\t"
1602 "lea 0x8(%rsp),%rsp");
1606 amd64_emit_sub (void)
1608 EMIT_ASM (amd64_sub
,
1609 "sub %rax,(%rsp)\n\t"
1614 amd64_emit_mul (void)
1620 amd64_emit_lsh (void)
1626 amd64_emit_rsh_signed (void)
1632 amd64_emit_rsh_unsigned (void)
1638 amd64_emit_ext (int arg
)
1643 EMIT_ASM (amd64_ext_8
,
1649 EMIT_ASM (amd64_ext_16
,
1654 EMIT_ASM (amd64_ext_32
,
1663 amd64_emit_log_not (void)
1665 EMIT_ASM (amd64_log_not
,
1666 "test %rax,%rax\n\t"
1672 amd64_emit_bit_and (void)
1674 EMIT_ASM (amd64_and
,
1675 "and (%rsp),%rax\n\t"
1676 "lea 0x8(%rsp),%rsp");
1680 amd64_emit_bit_or (void)
1683 "or (%rsp),%rax\n\t"
1684 "lea 0x8(%rsp),%rsp");
1688 amd64_emit_bit_xor (void)
1690 EMIT_ASM (amd64_xor
,
1691 "xor (%rsp),%rax\n\t"
1692 "lea 0x8(%rsp),%rsp");
1696 amd64_emit_bit_not (void)
1698 EMIT_ASM (amd64_bit_not
,
1699 "xorq $0xffffffffffffffff,%rax");
1703 amd64_emit_equal (void)
1705 EMIT_ASM (amd64_equal
,
1706 "cmp %rax,(%rsp)\n\t"
1707 "je .Lamd64_equal_true\n\t"
1709 "jmp .Lamd64_equal_end\n\t"
1710 ".Lamd64_equal_true:\n\t"
1712 ".Lamd64_equal_end:\n\t"
1713 "lea 0x8(%rsp),%rsp");
1717 amd64_emit_less_signed (void)
1719 EMIT_ASM (amd64_less_signed
,
1720 "cmp %rax,(%rsp)\n\t"
1721 "jl .Lamd64_less_signed_true\n\t"
1723 "jmp .Lamd64_less_signed_end\n\t"
1724 ".Lamd64_less_signed_true:\n\t"
1726 ".Lamd64_less_signed_end:\n\t"
1727 "lea 0x8(%rsp),%rsp");
1731 amd64_emit_less_unsigned (void)
1733 EMIT_ASM (amd64_less_unsigned
,
1734 "cmp %rax,(%rsp)\n\t"
1735 "jb .Lamd64_less_unsigned_true\n\t"
1737 "jmp .Lamd64_less_unsigned_end\n\t"
1738 ".Lamd64_less_unsigned_true:\n\t"
1740 ".Lamd64_less_unsigned_end:\n\t"
1741 "lea 0x8(%rsp),%rsp");
1745 amd64_emit_ref (int size
)
1750 EMIT_ASM (amd64_ref1
,
1754 EMIT_ASM (amd64_ref2
,
1758 EMIT_ASM (amd64_ref4
,
1759 "movl (%rax),%eax");
1762 EMIT_ASM (amd64_ref8
,
1763 "movq (%rax),%rax");
1769 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1771 EMIT_ASM (amd64_if_goto
,
1775 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1783 amd64_emit_goto (int *offset_p
, int *size_p
)
1785 EMIT_ASM (amd64_goto
,
1786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1794 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1796 int diff
= (to
- (from
+ size
));
1797 unsigned char buf
[sizeof (int)];
1805 memcpy (buf
, &diff
, sizeof (int));
1806 target_write_memory (from
, buf
, sizeof (int));
1810 amd64_emit_const (LONGEST num
)
1812 unsigned char buf
[16];
1814 CORE_ADDR buildaddr
= current_insn_ptr
;
1817 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1818 memcpy (&buf
[i
], &num
, sizeof (num
));
1820 append_insns (&buildaddr
, i
, buf
);
1821 current_insn_ptr
= buildaddr
;
1825 amd64_emit_call (CORE_ADDR fn
)
1827 unsigned char buf
[16];
1829 CORE_ADDR buildaddr
;
1832 /* The destination function being in the shared library, may be
1833 >31-bits away off the compiled code pad. */
1835 buildaddr
= current_insn_ptr
;
1837 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1841 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1843 /* Offset is too large for a call. Use callq, but that requires
1844 a register, so avoid it if possible. Use r10, since it is
1845 call-clobbered, we don't have to push/pop it. */
1846 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1848 memcpy (buf
+ i
, &fn
, 8);
1850 buf
[i
++] = 0xff; /* callq *%r10 */
1855 int offset32
= offset64
; /* we know we can't overflow here. */
1857 buf
[i
++] = 0xe8; /* call <reladdr> */
1858 memcpy (buf
+ i
, &offset32
, 4);
1862 append_insns (&buildaddr
, i
, buf
);
1863 current_insn_ptr
= buildaddr
;
1867 amd64_emit_reg (int reg
)
1869 unsigned char buf
[16];
1871 CORE_ADDR buildaddr
;
1873 /* Assume raw_regs is still in %rdi. */
1874 buildaddr
= current_insn_ptr
;
1876 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1877 memcpy (&buf
[i
], ®
, sizeof (reg
));
1879 append_insns (&buildaddr
, i
, buf
);
1880 current_insn_ptr
= buildaddr
;
1881 amd64_emit_call (get_raw_reg_func_addr ());
1885 amd64_emit_pop (void)
1887 EMIT_ASM (amd64_pop
,
1892 amd64_emit_stack_flush (void)
1894 EMIT_ASM (amd64_stack_flush
,
1899 amd64_emit_zero_ext (int arg
)
1904 EMIT_ASM (amd64_zero_ext_8
,
1908 EMIT_ASM (amd64_zero_ext_16
,
1909 "and $0xffff,%rax");
1912 EMIT_ASM (amd64_zero_ext_32
,
1913 "mov $0xffffffff,%rcx\n\t"
1922 amd64_emit_swap (void)
1924 EMIT_ASM (amd64_swap
,
1931 amd64_emit_stack_adjust (int n
)
1933 unsigned char buf
[16];
1935 CORE_ADDR buildaddr
= current_insn_ptr
;
1938 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1942 /* This only handles adjustments up to 16, but we don't expect any more. */
1944 append_insns (&buildaddr
, i
, buf
);
1945 current_insn_ptr
= buildaddr
;
1948 /* FN's prototype is `LONGEST(*fn)(int)'. */
1951 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1953 unsigned char buf
[16];
1955 CORE_ADDR buildaddr
;
1957 buildaddr
= current_insn_ptr
;
1959 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1960 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1962 append_insns (&buildaddr
, i
, buf
);
1963 current_insn_ptr
= buildaddr
;
1964 amd64_emit_call (fn
);
1967 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1970 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1972 unsigned char buf
[16];
1974 CORE_ADDR buildaddr
;
1976 buildaddr
= current_insn_ptr
;
1978 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1979 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1981 append_insns (&buildaddr
, i
, buf
);
1982 current_insn_ptr
= buildaddr
;
1983 EMIT_ASM (amd64_void_call_2_a
,
1984 /* Save away a copy of the stack top. */
1986 /* Also pass top as the second argument. */
1988 amd64_emit_call (fn
);
1989 EMIT_ASM (amd64_void_call_2_b
,
1990 /* Restore the stack top, %rax may have been trashed. */
1995 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1998 "cmp %rax,(%rsp)\n\t"
1999 "jne .Lamd64_eq_fallthru\n\t"
2000 "lea 0x8(%rsp),%rsp\n\t"
2002 /* jmp, but don't trust the assembler to choose the right jump */
2003 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2004 ".Lamd64_eq_fallthru:\n\t"
2005 "lea 0x8(%rsp),%rsp\n\t"
2015 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2018 "cmp %rax,(%rsp)\n\t"
2019 "je .Lamd64_ne_fallthru\n\t"
2020 "lea 0x8(%rsp),%rsp\n\t"
2022 /* jmp, but don't trust the assembler to choose the right jump */
2023 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2024 ".Lamd64_ne_fallthru:\n\t"
2025 "lea 0x8(%rsp),%rsp\n\t"
2035 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2038 "cmp %rax,(%rsp)\n\t"
2039 "jnl .Lamd64_lt_fallthru\n\t"
2040 "lea 0x8(%rsp),%rsp\n\t"
2042 /* jmp, but don't trust the assembler to choose the right jump */
2043 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2044 ".Lamd64_lt_fallthru:\n\t"
2045 "lea 0x8(%rsp),%rsp\n\t"
2055 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2058 "cmp %rax,(%rsp)\n\t"
2059 "jnle .Lamd64_le_fallthru\n\t"
2060 "lea 0x8(%rsp),%rsp\n\t"
2062 /* jmp, but don't trust the assembler to choose the right jump */
2063 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2064 ".Lamd64_le_fallthru:\n\t"
2065 "lea 0x8(%rsp),%rsp\n\t"
2075 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2078 "cmp %rax,(%rsp)\n\t"
2079 "jng .Lamd64_gt_fallthru\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2082 /* jmp, but don't trust the assembler to choose the right jump */
2083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2084 ".Lamd64_gt_fallthru:\n\t"
2085 "lea 0x8(%rsp),%rsp\n\t"
2095 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2098 "cmp %rax,(%rsp)\n\t"
2099 "jnge .Lamd64_ge_fallthru\n\t"
2100 ".Lamd64_ge_jump:\n\t"
2101 "lea 0x8(%rsp),%rsp\n\t"
2103 /* jmp, but don't trust the assembler to choose the right jump */
2104 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2105 ".Lamd64_ge_fallthru:\n\t"
2106 "lea 0x8(%rsp),%rsp\n\t"
2115 struct emit_ops amd64_emit_ops
=
2117 amd64_emit_prologue
,
2118 amd64_emit_epilogue
,
2123 amd64_emit_rsh_signed
,
2124 amd64_emit_rsh_unsigned
,
2132 amd64_emit_less_signed
,
2133 amd64_emit_less_unsigned
,
2137 amd64_write_goto_address
,
2142 amd64_emit_stack_flush
,
2143 amd64_emit_zero_ext
,
2145 amd64_emit_stack_adjust
,
2146 amd64_emit_int_call_1
,
2147 amd64_emit_void_call_2
,
2156 #endif /* __x86_64__ */
2159 i386_emit_prologue (void)
2161 EMIT_ASM32 (i386_prologue
,
2165 /* At this point, the raw regs base address is at 8(%ebp), and the
2166 value pointer is at 12(%ebp). */
2170 i386_emit_epilogue (void)
2172 EMIT_ASM32 (i386_epilogue
,
2173 "mov 12(%ebp),%ecx\n\t"
2174 "mov %eax,(%ecx)\n\t"
2175 "mov %ebx,0x4(%ecx)\n\t"
2183 i386_emit_add (void)
2185 EMIT_ASM32 (i386_add
,
2186 "add (%esp),%eax\n\t"
2187 "adc 0x4(%esp),%ebx\n\t"
2188 "lea 0x8(%esp),%esp");
2192 i386_emit_sub (void)
2194 EMIT_ASM32 (i386_sub
,
2195 "subl %eax,(%esp)\n\t"
2196 "sbbl %ebx,4(%esp)\n\t"
2202 i386_emit_mul (void)
2208 i386_emit_lsh (void)
2214 i386_emit_rsh_signed (void)
2220 i386_emit_rsh_unsigned (void)
2226 i386_emit_ext (int arg
)
2231 EMIT_ASM32 (i386_ext_8
,
2234 "movl %eax,%ebx\n\t"
2238 EMIT_ASM32 (i386_ext_16
,
2240 "movl %eax,%ebx\n\t"
2244 EMIT_ASM32 (i386_ext_32
,
2245 "movl %eax,%ebx\n\t"
2254 i386_emit_log_not (void)
2256 EMIT_ASM32 (i386_log_not
,
2258 "test %eax,%eax\n\t"
2265 i386_emit_bit_and (void)
2267 EMIT_ASM32 (i386_and
,
2268 "and (%esp),%eax\n\t"
2269 "and 0x4(%esp),%ebx\n\t"
2270 "lea 0x8(%esp),%esp");
2274 i386_emit_bit_or (void)
2276 EMIT_ASM32 (i386_or
,
2277 "or (%esp),%eax\n\t"
2278 "or 0x4(%esp),%ebx\n\t"
2279 "lea 0x8(%esp),%esp");
2283 i386_emit_bit_xor (void)
2285 EMIT_ASM32 (i386_xor
,
2286 "xor (%esp),%eax\n\t"
2287 "xor 0x4(%esp),%ebx\n\t"
2288 "lea 0x8(%esp),%esp");
2292 i386_emit_bit_not (void)
2294 EMIT_ASM32 (i386_bit_not
,
2295 "xor $0xffffffff,%eax\n\t"
2296 "xor $0xffffffff,%ebx\n\t");
2300 i386_emit_equal (void)
2302 EMIT_ASM32 (i386_equal
,
2303 "cmpl %ebx,4(%esp)\n\t"
2304 "jne .Li386_equal_false\n\t"
2305 "cmpl %eax,(%esp)\n\t"
2306 "je .Li386_equal_true\n\t"
2307 ".Li386_equal_false:\n\t"
2309 "jmp .Li386_equal_end\n\t"
2310 ".Li386_equal_true:\n\t"
2312 ".Li386_equal_end:\n\t"
2314 "lea 0x8(%esp),%esp");
2318 i386_emit_less_signed (void)
2320 EMIT_ASM32 (i386_less_signed
,
2321 "cmpl %ebx,4(%esp)\n\t"
2322 "jl .Li386_less_signed_true\n\t"
2323 "jne .Li386_less_signed_false\n\t"
2324 "cmpl %eax,(%esp)\n\t"
2325 "jl .Li386_less_signed_true\n\t"
2326 ".Li386_less_signed_false:\n\t"
2328 "jmp .Li386_less_signed_end\n\t"
2329 ".Li386_less_signed_true:\n\t"
2331 ".Li386_less_signed_end:\n\t"
2333 "lea 0x8(%esp),%esp");
2337 i386_emit_less_unsigned (void)
2339 EMIT_ASM32 (i386_less_unsigned
,
2340 "cmpl %ebx,4(%esp)\n\t"
2341 "jb .Li386_less_unsigned_true\n\t"
2342 "jne .Li386_less_unsigned_false\n\t"
2343 "cmpl %eax,(%esp)\n\t"
2344 "jb .Li386_less_unsigned_true\n\t"
2345 ".Li386_less_unsigned_false:\n\t"
2347 "jmp .Li386_less_unsigned_end\n\t"
2348 ".Li386_less_unsigned_true:\n\t"
2350 ".Li386_less_unsigned_end:\n\t"
2352 "lea 0x8(%esp),%esp");
2356 i386_emit_ref (int size
)
2361 EMIT_ASM32 (i386_ref1
,
2365 EMIT_ASM32 (i386_ref2
,
2369 EMIT_ASM32 (i386_ref4
,
2370 "movl (%eax),%eax");
2373 EMIT_ASM32 (i386_ref8
,
2374 "movl 4(%eax),%ebx\n\t"
2375 "movl (%eax),%eax");
2381 i386_emit_if_goto (int *offset_p
, int *size_p
)
2383 EMIT_ASM32 (i386_if_goto
,
2389 /* Don't trust the assembler to choose the right jump */
2390 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2393 *offset_p
= 11; /* be sure that this matches the sequence above */
2399 i386_emit_goto (int *offset_p
, int *size_p
)
2401 EMIT_ASM32 (i386_goto
,
2402 /* Don't trust the assembler to choose the right jump */
2403 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2411 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2413 int diff
= (to
- (from
+ size
));
2414 unsigned char buf
[sizeof (int)];
2416 /* We're only doing 4-byte sizes at the moment. */
2423 memcpy (buf
, &diff
, sizeof (int));
2424 target_write_memory (from
, buf
, sizeof (int));
2428 i386_emit_const (LONGEST num
)
2430 unsigned char buf
[16];
2432 CORE_ADDR buildaddr
= current_insn_ptr
;
2435 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2436 lo
= num
& 0xffffffff;
2437 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2439 hi
= ((num
>> 32) & 0xffffffff);
2442 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2443 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2448 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2450 append_insns (&buildaddr
, i
, buf
);
2451 current_insn_ptr
= buildaddr
;
2455 i386_emit_call (CORE_ADDR fn
)
2457 unsigned char buf
[16];
2459 CORE_ADDR buildaddr
;
2461 buildaddr
= current_insn_ptr
;
2463 buf
[i
++] = 0xe8; /* call <reladdr> */
2464 offset
= ((int) fn
) - (buildaddr
+ 5);
2465 memcpy (buf
+ 1, &offset
, 4);
2466 append_insns (&buildaddr
, 5, buf
);
2467 current_insn_ptr
= buildaddr
;
2471 i386_emit_reg (int reg
)
2473 unsigned char buf
[16];
2475 CORE_ADDR buildaddr
;
2477 EMIT_ASM32 (i386_reg_a
,
2479 buildaddr
= current_insn_ptr
;
2481 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2482 memcpy (&buf
[i
], ®
, sizeof (reg
));
2484 append_insns (&buildaddr
, i
, buf
);
2485 current_insn_ptr
= buildaddr
;
2486 EMIT_ASM32 (i386_reg_b
,
2487 "mov %eax,4(%esp)\n\t"
2488 "mov 8(%ebp),%eax\n\t"
2490 i386_emit_call (get_raw_reg_func_addr ());
2491 EMIT_ASM32 (i386_reg_c
,
2493 "lea 0x8(%esp),%esp");
2497 i386_emit_pop (void)
2499 EMIT_ASM32 (i386_pop
,
2505 i386_emit_stack_flush (void)
2507 EMIT_ASM32 (i386_stack_flush
,
2513 i386_emit_zero_ext (int arg
)
2518 EMIT_ASM32 (i386_zero_ext_8
,
2519 "and $0xff,%eax\n\t"
2523 EMIT_ASM32 (i386_zero_ext_16
,
2524 "and $0xffff,%eax\n\t"
2528 EMIT_ASM32 (i386_zero_ext_32
,
2537 i386_emit_swap (void)
2539 EMIT_ASM32 (i386_swap
,
2549 i386_emit_stack_adjust (int n
)
2551 unsigned char buf
[16];
2553 CORE_ADDR buildaddr
= current_insn_ptr
;
2556 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2560 append_insns (&buildaddr
, i
, buf
);
2561 current_insn_ptr
= buildaddr
;
2564 /* FN's prototype is `LONGEST(*fn)(int)'. */
2567 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2569 unsigned char buf
[16];
2571 CORE_ADDR buildaddr
;
2573 EMIT_ASM32 (i386_int_call_1_a
,
2574 /* Reserve a bit of stack space. */
2576 /* Put the one argument on the stack. */
2577 buildaddr
= current_insn_ptr
;
2579 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2582 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2584 append_insns (&buildaddr
, i
, buf
);
2585 current_insn_ptr
= buildaddr
;
2586 i386_emit_call (fn
);
2587 EMIT_ASM32 (i386_int_call_1_c
,
2589 "lea 0x8(%esp),%esp");
2592 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2595 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2597 unsigned char buf
[16];
2599 CORE_ADDR buildaddr
;
2601 EMIT_ASM32 (i386_void_call_2_a
,
2602 /* Preserve %eax only; we don't have to worry about %ebx. */
2604 /* Reserve a bit of stack space for arguments. */
2605 "sub $0x10,%esp\n\t"
2606 /* Copy "top" to the second argument position. (Note that
2607 we can't assume function won't scribble on its
2608 arguments, so don't try to restore from this.) */
2609 "mov %eax,4(%esp)\n\t"
2610 "mov %ebx,8(%esp)");
2611 /* Put the first argument on the stack. */
2612 buildaddr
= current_insn_ptr
;
2614 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2617 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2619 append_insns (&buildaddr
, i
, buf
);
2620 current_insn_ptr
= buildaddr
;
2621 i386_emit_call (fn
);
2622 EMIT_ASM32 (i386_void_call_2_b
,
2623 "lea 0x10(%esp),%esp\n\t"
2624 /* Restore original stack top. */
2630 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2633 /* Check low half first, more likely to be decider */
2634 "cmpl %eax,(%esp)\n\t"
2635 "jne .Leq_fallthru\n\t"
2636 "cmpl %ebx,4(%esp)\n\t"
2637 "jne .Leq_fallthru\n\t"
2638 "lea 0x8(%esp),%esp\n\t"
2641 /* jmp, but don't trust the assembler to choose the right jump */
2642 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2643 ".Leq_fallthru:\n\t"
2644 "lea 0x8(%esp),%esp\n\t"
2655 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2658 /* Check low half first, more likely to be decider */
2659 "cmpl %eax,(%esp)\n\t"
2661 "cmpl %ebx,4(%esp)\n\t"
2662 "je .Lne_fallthru\n\t"
2664 "lea 0x8(%esp),%esp\n\t"
2667 /* jmp, but don't trust the assembler to choose the right jump */
2668 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2669 ".Lne_fallthru:\n\t"
2670 "lea 0x8(%esp),%esp\n\t"
2681 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2684 "cmpl %ebx,4(%esp)\n\t"
2686 "jne .Llt_fallthru\n\t"
2687 "cmpl %eax,(%esp)\n\t"
2688 "jnl .Llt_fallthru\n\t"
2690 "lea 0x8(%esp),%esp\n\t"
2693 /* jmp, but don't trust the assembler to choose the right jump */
2694 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2695 ".Llt_fallthru:\n\t"
2696 "lea 0x8(%esp),%esp\n\t"
2707 i386_emit_le_goto (int *offset_p
, int *size_p
)
2710 "cmpl %ebx,4(%esp)\n\t"
2712 "jne .Lle_fallthru\n\t"
2713 "cmpl %eax,(%esp)\n\t"
2714 "jnle .Lle_fallthru\n\t"
2716 "lea 0x8(%esp),%esp\n\t"
2719 /* jmp, but don't trust the assembler to choose the right jump */
2720 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2721 ".Lle_fallthru:\n\t"
2722 "lea 0x8(%esp),%esp\n\t"
2733 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2736 "cmpl %ebx,4(%esp)\n\t"
2738 "jne .Lgt_fallthru\n\t"
2739 "cmpl %eax,(%esp)\n\t"
2740 "jng .Lgt_fallthru\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2745 /* jmp, but don't trust the assembler to choose the right jump */
2746 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2747 ".Lgt_fallthru:\n\t"
2748 "lea 0x8(%esp),%esp\n\t"
2759 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2762 "cmpl %ebx,4(%esp)\n\t"
2764 "jne .Lge_fallthru\n\t"
2765 "cmpl %eax,(%esp)\n\t"
2766 "jnge .Lge_fallthru\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2771 /* jmp, but don't trust the assembler to choose the right jump */
2772 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2773 ".Lge_fallthru:\n\t"
2774 "lea 0x8(%esp),%esp\n\t"
2784 struct emit_ops i386_emit_ops
=
2792 i386_emit_rsh_signed
,
2793 i386_emit_rsh_unsigned
,
2801 i386_emit_less_signed
,
2802 i386_emit_less_unsigned
,
2806 i386_write_goto_address
,
2811 i386_emit_stack_flush
,
2814 i386_emit_stack_adjust
,
2815 i386_emit_int_call_1
,
2816 i386_emit_void_call_2
,
2826 static struct emit_ops
*
2830 if (is_64bit_tdesc ())
2831 return &amd64_emit_ops
;
2834 return &i386_emit_ops
;
2837 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2839 static const gdb_byte
*
2840 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2842 *size
= x86_breakpoint_len
;
2843 return x86_breakpoint
;
2847 x86_supports_range_stepping (void)
2852 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2856 x86_supports_hardware_single_step (void)
2862 x86_get_ipa_tdesc_idx (void)
2864 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2865 const struct target_desc
*tdesc
= regcache
->tdesc
;
2868 return amd64_get_ipa_tdesc_idx (tdesc
);
2871 if (tdesc
== tdesc_i386_linux_no_xml
)
2872 return X86_TDESC_SSE
;
2874 return i386_get_ipa_tdesc_idx (tdesc
);
2877 /* This is initialized assuming an amd64 target.
2878 x86_arch_setup will correct it for i386 or amd64 targets. */
2880 struct linux_target_ops the_low_target
=
2882 x86_linux_regs_info
,
2883 x86_cannot_fetch_register
,
2884 x86_cannot_store_register
,
2885 NULL
, /* fetch_register */
2888 NULL
, /* breakpoint_kind_from_pc */
2889 x86_sw_breakpoint_from_kind
,
2893 x86_supports_z_point_type
,
2896 x86_stopped_by_watchpoint
,
2897 x86_stopped_data_address
,
2898 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2899 native i386 case (no registers smaller than an xfer unit), and are not
2900 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2903 /* need to fix up i386 siginfo if host is amd64 */
2905 x86_linux_new_process
,
2906 x86_linux_delete_process
,
2907 x86_linux_new_thread
,
2908 x86_linux_delete_thread
,
2910 x86_linux_prepare_to_resume
,
2911 x86_linux_process_qsupported
,
2912 x86_supports_tracepoints
,
2913 x86_get_thread_area
,
2914 x86_install_fast_tracepoint_jump_pad
,
2916 x86_get_min_fast_tracepoint_insn_len
,
2917 x86_supports_range_stepping
,
2918 NULL
, /* breakpoint_kind_from_current_state */
2919 x86_supports_hardware_single_step
,
2920 x86_get_syscall_trapinfo
,
2921 x86_get_ipa_tdesc_idx
,
2924 /* The linux target ops object. */
2926 linux_process_target
*the_linux_target
= &the_x86_target
;
2929 initialize_low_arch (void)
2931 /* Initialize the Linux target descriptions. */
2933 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2934 copy_target_description (tdesc_amd64_linux_no_xml
,
2935 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2937 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2940 tdesc_i386_linux_no_xml
= allocate_target_description ();
2941 copy_target_description (tdesc_i386_linux_no_xml
,
2942 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2943 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2945 initialize_regsets_info (&x86_regsets_info
);