1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
155 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
156 #define X86_64_USER_REGS (GS + 1)
158 #else /* ! __x86_64__ */
160 /* Mapping between the general-purpose registers in `struct user'
161 format and GDB's register array layout. */
162 static /*const*/ int i386_regmap
[] =
164 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
165 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
166 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
167 DS
* 4, ES
* 4, FS
* 4, GS
* 4
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* Returns true if the current inferior belongs to a x86-64 process,
182 is_64bit_tdesc (void)
184 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
186 return register_size (regcache
->tdesc
, 0) == 8;
192 /* Called by libthread_db. */
195 ps_get_thread_area (struct ps_prochandle
*ph
,
196 lwpid_t lwpid
, int idx
, void **base
)
199 int use_64bit
= is_64bit_tdesc ();
206 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
210 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
221 unsigned int desc
[4];
223 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
224 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
227 /* Ensure we properly extend the value to 64-bits for x86_64. */
228 *base
= (void *) (uintptr_t) desc
[1];
233 /* Get the thread area address. This is used to recognize which
234 thread is which when tracing with the in-process agent library. We
235 don't read anything from the address, and treat it as opaque; it's
236 the address itself that we assume is unique per-thread. */
239 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
242 int use_64bit
= is_64bit_tdesc ();
247 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
249 *addr
= (CORE_ADDR
) (uintptr_t) base
;
258 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
259 struct thread_info
*thr
= get_lwp_thread (lwp
);
260 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
261 unsigned int desc
[4];
263 const int reg_thread_area
= 3; /* bits to scale down register value. */
266 collect_register_by_name (regcache
, "gs", &gs
);
268 idx
= gs
>> reg_thread_area
;
270 if (ptrace (PTRACE_GET_THREAD_AREA
,
272 (void *) (long) idx
, (unsigned long) &desc
) < 0)
283 x86_cannot_store_register (int regno
)
286 if (is_64bit_tdesc ())
290 return regno
>= I386_NUM_REGS
;
294 x86_cannot_fetch_register (int regno
)
297 if (is_64bit_tdesc ())
301 return regno
>= I386_NUM_REGS
;
305 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
310 if (register_size (regcache
->tdesc
, 0) == 8)
312 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
313 if (x86_64_regmap
[i
] != -1)
314 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
316 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
319 int lwpid
= lwpid_of (current_thread
);
321 collect_register_by_name (regcache
, "fs_base", &base
);
322 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
324 collect_register_by_name (regcache
, "gs_base", &base
);
325 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
332 /* 32-bit inferior registers need to be zero-extended.
333 Callers would read uninitialized memory otherwise. */
334 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
337 for (i
= 0; i
< I386_NUM_REGS
; i
++)
338 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
340 collect_register_by_name (regcache
, "orig_eax",
341 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
345 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
350 if (register_size (regcache
->tdesc
, 0) == 8)
352 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
353 if (x86_64_regmap
[i
] != -1)
354 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
359 int lwpid
= lwpid_of (current_thread
);
361 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
362 supply_register_by_name (regcache
, "fs_base", &base
);
364 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
365 supply_register_by_name (regcache
, "gs_base", &base
);
372 for (i
= 0; i
< I386_NUM_REGS
; i
++)
373 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
375 supply_register_by_name (regcache
, "orig_eax",
376 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
380 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
383 i387_cache_to_fxsave (regcache
, buf
);
385 i387_cache_to_fsave (regcache
, buf
);
390 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
393 i387_fxsave_to_cache (regcache
, buf
);
395 i387_fsave_to_cache (regcache
, buf
);
402 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
408 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
410 i387_fxsave_to_cache (regcache
, buf
);
416 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_xsave (regcache
, buf
);
422 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
424 i387_xsave_to_cache (regcache
, buf
);
427 /* ??? The non-biarch i386 case stores all the i387 regs twice.
428 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
429 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
430 doesn't work. IWBN to avoid the duplication in the case where it
431 does work. Maybe the arch_setup routine could check whether it works
432 and update the supported regsets accordingly. */
434 static struct regset_info x86_regsets
[] =
436 #ifdef HAVE_PTRACE_GETREGS
437 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
439 x86_fill_gregset
, x86_store_gregset
},
440 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
441 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
443 # ifdef HAVE_PTRACE_GETFPXREGS
444 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
446 x86_fill_fpxregset
, x86_store_fpxregset
},
449 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
451 x86_fill_fpregset
, x86_store_fpregset
},
452 #endif /* HAVE_PTRACE_GETREGS */
457 x86_get_pc (struct regcache
*regcache
)
459 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
465 collect_register_by_name (regcache
, "rip", &pc
);
466 return (CORE_ADDR
) pc
;
472 collect_register_by_name (regcache
, "eip", &pc
);
473 return (CORE_ADDR
) pc
;
478 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
486 supply_register_by_name (regcache
, "rip", &newpc
);
492 supply_register_by_name (regcache
, "eip", &newpc
);
496 static const gdb_byte x86_breakpoint
[] = { 0xCC };
497 #define x86_breakpoint_len 1
500 x86_breakpoint_at (CORE_ADDR pc
)
504 (*the_target
->read_memory
) (pc
, &c
, 1);
511 /* Low-level function vector. */
512 struct x86_dr_low_type x86_dr_low
=
514 x86_linux_dr_set_control
,
515 x86_linux_dr_set_addr
,
516 x86_linux_dr_get_addr
,
517 x86_linux_dr_get_status
,
518 x86_linux_dr_get_control
,
522 /* Breakpoint/Watchpoint support. */
525 x86_supports_z_point_type (char z_type
)
531 case Z_PACKET_WRITE_WP
:
532 case Z_PACKET_ACCESS_WP
:
540 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
541 int size
, struct raw_breakpoint
*bp
)
543 struct process_info
*proc
= current_process ();
547 case raw_bkpt_type_hw
:
548 case raw_bkpt_type_write_wp
:
549 case raw_bkpt_type_access_wp
:
551 enum target_hw_bp_type hw_type
552 = raw_bkpt_type_to_target_hw_bp_type (type
);
553 struct x86_debug_reg_state
*state
554 = &proc
->priv
->arch_private
->debug_reg_state
;
556 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
566 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
567 int size
, struct raw_breakpoint
*bp
)
569 struct process_info
*proc
= current_process ();
573 case raw_bkpt_type_hw
:
574 case raw_bkpt_type_write_wp
:
575 case raw_bkpt_type_access_wp
:
577 enum target_hw_bp_type hw_type
578 = raw_bkpt_type_to_target_hw_bp_type (type
);
579 struct x86_debug_reg_state
*state
580 = &proc
->priv
->arch_private
->debug_reg_state
;
582 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
591 x86_stopped_by_watchpoint (void)
593 struct process_info
*proc
= current_process ();
594 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
598 x86_stopped_data_address (void)
600 struct process_info
*proc
= current_process ();
602 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
608 /* Called when a new process is created. */
610 static struct arch_process_info
*
611 x86_linux_new_process (void)
613 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
615 x86_low_init_dregs (&info
->debug_reg_state
);
620 /* Target routine for linux_new_fork. */
623 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
625 /* These are allocated by linux_add_process. */
626 gdb_assert (parent
->priv
!= NULL
627 && parent
->priv
->arch_private
!= NULL
);
628 gdb_assert (child
->priv
!= NULL
629 && child
->priv
->arch_private
!= NULL
);
631 /* Linux kernel before 2.6.33 commit
632 72f674d203cd230426437cdcf7dd6f681dad8b0d
633 will inherit hardware debug registers from parent
634 on fork/vfork/clone. Newer Linux kernels create such tasks with
635 zeroed debug registers.
637 GDB core assumes the child inherits the watchpoints/hw
638 breakpoints of the parent, and will remove them all from the
639 forked off process. Copy the debug registers mirrors into the
640 new process so that all breakpoints and watchpoints can be
641 removed together. The debug registers mirror will become zeroed
642 in the end before detaching the forked off process, thus making
643 this compatible with older Linux kernels too. */
645 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
648 /* See nat/x86-dregs.h. */
650 struct x86_debug_reg_state
*
651 x86_debug_reg_state (pid_t pid
)
653 struct process_info
*proc
= find_process_pid (pid
);
655 return &proc
->priv
->arch_private
->debug_reg_state
;
658 /* When GDBSERVER is built as a 64-bit application on linux, the
659 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
660 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
661 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
662 conversion in-place ourselves. */
664 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
665 layout of the inferiors' architecture. Returns true if any
666 conversion was done; false otherwise. If DIRECTION is 1, then copy
667 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
671 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
674 unsigned int machine
;
675 int tid
= lwpid_of (current_thread
);
676 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
678 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
679 if (!is_64bit_tdesc ())
680 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
682 /* No fixup for native x32 GDB. */
683 else if (!is_elf64
&& sizeof (void *) == 8)
684 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
693 /* Format of XSAVE extended state is:
697 sw_usable_bytes[464..511]
698 xstate_hdr_bytes[512..575]
703 Same memory layout will be used for the coredump NT_X86_XSTATE
704 representing the XSAVE extended state registers.
706 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
707 extended state mask, which is the same as the extended control register
708 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
709 together with the mask saved in the xstate_hdr_bytes to determine what
710 states the processor/OS supports and what state, used or initialized,
711 the process/thread is in. */
712 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
714 /* Does the current host support the GETFPXREGS request? The header
715 file may or may not define it, and even if it is defined, the
716 kernel will return EIO if it's running on a pre-SSE processor. */
717 int have_ptrace_getfpxregs
=
718 #ifdef HAVE_PTRACE_GETFPXREGS
725 /* Get Linux/x86 target description from running target. */
727 static const struct target_desc
*
728 x86_linux_read_description (void)
730 unsigned int machine
;
734 static uint64_t xcr0
;
735 struct regset_info
*regset
;
737 tid
= lwpid_of (current_thread
);
739 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
741 if (sizeof (void *) == 4)
744 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
746 else if (machine
== EM_X86_64
)
747 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
751 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
752 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
754 elf_fpxregset_t fpxregs
;
756 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
758 have_ptrace_getfpxregs
= 0;
759 have_ptrace_getregset
= 0;
760 return i386_linux_read_description (X86_XSTATE_X87
);
763 have_ptrace_getfpxregs
= 1;
769 x86_xcr0
= X86_XSTATE_SSE_MASK
;
773 if (machine
== EM_X86_64
)
774 return tdesc_amd64_linux_no_xml
;
777 return tdesc_i386_linux_no_xml
;
780 if (have_ptrace_getregset
== -1)
782 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
785 iov
.iov_base
= xstateregs
;
786 iov
.iov_len
= sizeof (xstateregs
);
788 /* Check if PTRACE_GETREGSET works. */
789 if (ptrace (PTRACE_GETREGSET
, tid
,
790 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
791 have_ptrace_getregset
= 0;
794 have_ptrace_getregset
= 1;
796 /* Get XCR0 from XSAVE extended state. */
797 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
798 / sizeof (uint64_t))];
800 /* Use PTRACE_GETREGSET if it is available. */
801 for (regset
= x86_regsets
;
802 regset
->fill_function
!= NULL
; regset
++)
803 if (regset
->get_request
== PTRACE_GETREGSET
)
804 regset
->size
= X86_XSTATE_SIZE (xcr0
);
805 else if (regset
->type
!= GENERAL_REGS
)
810 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
811 xcr0_features
= (have_ptrace_getregset
812 && (xcr0
& X86_XSTATE_ALL_MASK
));
817 if (machine
== EM_X86_64
)
820 const target_desc
*tdesc
= NULL
;
824 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
829 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
835 const target_desc
*tdesc
= NULL
;
838 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
841 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
846 gdb_assert_not_reached ("failed to return tdesc");
849 /* Callback for for_each_inferior. Calls the arch_setup routine for
853 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
855 int pid
= ptid_get_pid (entry
->id
);
857 /* Look up any thread of this processes. */
858 current_thread
= find_any_thread_of_pid (pid
);
860 the_low_target
.arch_setup ();
863 /* Update all the target description of all processes; a new GDB
864 connected, and it may or not support xml target descriptions. */
867 x86_linux_update_xmltarget (void)
869 struct thread_info
*saved_thread
= current_thread
;
871 /* Before changing the register cache's internal layout, flush the
872 contents of the current valid caches back to the threads, and
873 release the current regcache objects. */
876 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
878 current_thread
= saved_thread
;
881 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
885 x86_linux_process_qsupported (char **features
, int count
)
889 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
890 with "i386" in qSupported query, it supports x86 XML target
893 for (i
= 0; i
< count
; i
++)
895 const char *feature
= features
[i
];
897 if (startswith (feature
, "xmlRegisters="))
899 char *copy
= xstrdup (feature
+ 13);
902 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
904 if (strcmp (p
, "i386") == 0)
914 x86_linux_update_xmltarget ();
917 /* Common for x86/x86-64. */
919 static struct regsets_info x86_regsets_info
=
921 x86_regsets
, /* regsets */
923 NULL
, /* disabled_regsets */
927 static struct regs_info amd64_linux_regs_info
=
929 NULL
, /* regset_bitmap */
930 NULL
, /* usrregs_info */
934 static struct usrregs_info i386_linux_usrregs_info
=
940 static struct regs_info i386_linux_regs_info
=
942 NULL
, /* regset_bitmap */
943 &i386_linux_usrregs_info
,
947 const struct regs_info
*
948 x86_linux_regs_info (void)
951 if (is_64bit_tdesc ())
952 return &amd64_linux_regs_info
;
955 return &i386_linux_regs_info
;
958 /* Initialize the target description for the architecture of the
962 x86_arch_setup (void)
964 current_process ()->tdesc
= x86_linux_read_description ();
967 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
968 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
971 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
973 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
979 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
980 *sysno
= (int) l_sysno
;
983 collect_register_by_name (regcache
, "orig_eax", sysno
);
987 x86_supports_tracepoints (void)
993 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
995 write_inferior_memory (*to
, buf
, len
);
1000 push_opcode (unsigned char *buf
, const char *op
)
1002 unsigned char *buf_org
= buf
;
1007 unsigned long ul
= strtoul (op
, &endptr
, 16);
1016 return buf
- buf_org
;
1021 /* Build a jump pad that saves registers and calls a collection
1022 function. Writes a jump instruction to the jump pad to
1023 JJUMPAD_INSN. The caller is responsible to write it in at the
1024 tracepoint address. */
1027 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1028 CORE_ADDR collector
,
1031 CORE_ADDR
*jump_entry
,
1032 CORE_ADDR
*trampoline
,
1033 ULONGEST
*trampoline_size
,
1034 unsigned char *jjump_pad_insn
,
1035 ULONGEST
*jjump_pad_insn_size
,
1036 CORE_ADDR
*adjusted_insn_addr
,
1037 CORE_ADDR
*adjusted_insn_addr_end
,
1040 unsigned char buf
[40];
1044 CORE_ADDR buildaddr
= *jump_entry
;
1046 /* Build the jump pad. */
1048 /* First, do tracepoint data collection. Save registers. */
1050 /* Need to ensure stack pointer saved first. */
1051 buf
[i
++] = 0x54; /* push %rsp */
1052 buf
[i
++] = 0x55; /* push %rbp */
1053 buf
[i
++] = 0x57; /* push %rdi */
1054 buf
[i
++] = 0x56; /* push %rsi */
1055 buf
[i
++] = 0x52; /* push %rdx */
1056 buf
[i
++] = 0x51; /* push %rcx */
1057 buf
[i
++] = 0x53; /* push %rbx */
1058 buf
[i
++] = 0x50; /* push %rax */
1059 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1060 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1061 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1062 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1063 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1064 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1065 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1066 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1067 buf
[i
++] = 0x9c; /* pushfq */
1068 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1070 memcpy (buf
+ i
, &tpaddr
, 8);
1072 buf
[i
++] = 0x57; /* push %rdi */
1073 append_insns (&buildaddr
, i
, buf
);
1075 /* Stack space for the collecting_t object. */
1077 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1078 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1079 memcpy (buf
+ i
, &tpoint
, 8);
1081 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1082 i
+= push_opcode (&buf
[i
],
1083 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1084 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1085 append_insns (&buildaddr
, i
, buf
);
1089 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1090 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1092 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1093 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1094 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1095 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1096 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1097 append_insns (&buildaddr
, i
, buf
);
1099 /* Set up the gdb_collect call. */
1100 /* At this point, (stack pointer + 0x18) is the base of our saved
1104 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1105 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1107 /* tpoint address may be 64-bit wide. */
1108 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1109 memcpy (buf
+ i
, &tpoint
, 8);
1111 append_insns (&buildaddr
, i
, buf
);
1113 /* The collector function being in the shared library, may be
1114 >31-bits away off the jump pad. */
1116 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1117 memcpy (buf
+ i
, &collector
, 8);
1119 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1120 append_insns (&buildaddr
, i
, buf
);
1122 /* Clear the spin-lock. */
1124 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1125 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1126 memcpy (buf
+ i
, &lockaddr
, 8);
1128 append_insns (&buildaddr
, i
, buf
);
1130 /* Remove stack that had been used for the collect_t object. */
1132 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1133 append_insns (&buildaddr
, i
, buf
);
1135 /* Restore register state. */
1137 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1141 buf
[i
++] = 0x9d; /* popfq */
1142 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1143 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1144 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1145 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1146 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1147 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1148 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1149 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1150 buf
[i
++] = 0x58; /* pop %rax */
1151 buf
[i
++] = 0x5b; /* pop %rbx */
1152 buf
[i
++] = 0x59; /* pop %rcx */
1153 buf
[i
++] = 0x5a; /* pop %rdx */
1154 buf
[i
++] = 0x5e; /* pop %rsi */
1155 buf
[i
++] = 0x5f; /* pop %rdi */
1156 buf
[i
++] = 0x5d; /* pop %rbp */
1157 buf
[i
++] = 0x5c; /* pop %rsp */
1158 append_insns (&buildaddr
, i
, buf
);
1160 /* Now, adjust the original instruction to execute in the jump
1162 *adjusted_insn_addr
= buildaddr
;
1163 relocate_instruction (&buildaddr
, tpaddr
);
1164 *adjusted_insn_addr_end
= buildaddr
;
1166 /* Finally, write a jump back to the program. */
1168 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1169 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1172 "E.Jump back from jump pad too far from tracepoint "
1173 "(offset 0x%" PRIx64
" > int32).", loffset
);
1177 offset
= (int) loffset
;
1178 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1179 memcpy (buf
+ 1, &offset
, 4);
1180 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1182 /* The jump pad is now built. Wire in a jump to our jump pad. This
1183 is always done last (by our caller actually), so that we can
1184 install fast tracepoints with threads running. This relies on
1185 the agent's atomic write support. */
1186 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1187 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1190 "E.Jump pad too far from tracepoint "
1191 "(offset 0x%" PRIx64
" > int32).", loffset
);
1195 offset
= (int) loffset
;
1197 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1198 memcpy (buf
+ 1, &offset
, 4);
1199 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1200 *jjump_pad_insn_size
= sizeof (jump_insn
);
1202 /* Return the end address of our pad. */
1203 *jump_entry
= buildaddr
;
1208 #endif /* __x86_64__ */
1210 /* Build a jump pad that saves registers and calls a collection
1211 function. Writes a jump instruction to the jump pad to
1212 JJUMPAD_INSN. The caller is responsible to write it in at the
1213 tracepoint address. */
1216 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1217 CORE_ADDR collector
,
1220 CORE_ADDR
*jump_entry
,
1221 CORE_ADDR
*trampoline
,
1222 ULONGEST
*trampoline_size
,
1223 unsigned char *jjump_pad_insn
,
1224 ULONGEST
*jjump_pad_insn_size
,
1225 CORE_ADDR
*adjusted_insn_addr
,
1226 CORE_ADDR
*adjusted_insn_addr_end
,
1229 unsigned char buf
[0x100];
1231 CORE_ADDR buildaddr
= *jump_entry
;
1233 /* Build the jump pad. */
1235 /* First, do tracepoint data collection. Save registers. */
1237 buf
[i
++] = 0x60; /* pushad */
1238 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1239 *((int *)(buf
+ i
)) = (int) tpaddr
;
1241 buf
[i
++] = 0x9c; /* pushf */
1242 buf
[i
++] = 0x1e; /* push %ds */
1243 buf
[i
++] = 0x06; /* push %es */
1244 buf
[i
++] = 0x0f; /* push %fs */
1246 buf
[i
++] = 0x0f; /* push %gs */
1248 buf
[i
++] = 0x16; /* push %ss */
1249 buf
[i
++] = 0x0e; /* push %cs */
1250 append_insns (&buildaddr
, i
, buf
);
1252 /* Stack space for the collecting_t object. */
1254 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1256 /* Build the object. */
1257 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1258 memcpy (buf
+ i
, &tpoint
, 4);
1260 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1262 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1263 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1264 append_insns (&buildaddr
, i
, buf
);
1266 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1267 If we cared for it, this could be using xchg alternatively. */
1270 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1271 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1273 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1275 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1276 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1277 append_insns (&buildaddr
, i
, buf
);
1280 /* Set up arguments to the gdb_collect call. */
1282 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1283 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1284 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1285 append_insns (&buildaddr
, i
, buf
);
1288 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1289 append_insns (&buildaddr
, i
, buf
);
1292 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1293 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1295 append_insns (&buildaddr
, i
, buf
);
1297 buf
[0] = 0xe8; /* call <reladdr> */
1298 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1299 memcpy (buf
+ 1, &offset
, 4);
1300 append_insns (&buildaddr
, 5, buf
);
1301 /* Clean up after the call. */
1302 buf
[0] = 0x83; /* add $0x8,%esp */
1305 append_insns (&buildaddr
, 3, buf
);
1308 /* Clear the spin-lock. This would need the LOCK prefix on older
1311 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1312 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1313 memcpy (buf
+ i
, &lockaddr
, 4);
1315 append_insns (&buildaddr
, i
, buf
);
1318 /* Remove stack that had been used for the collect_t object. */
1320 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1321 append_insns (&buildaddr
, i
, buf
);
1324 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1327 buf
[i
++] = 0x17; /* pop %ss */
1328 buf
[i
++] = 0x0f; /* pop %gs */
1330 buf
[i
++] = 0x0f; /* pop %fs */
1332 buf
[i
++] = 0x07; /* pop %es */
1333 buf
[i
++] = 0x1f; /* pop %ds */
1334 buf
[i
++] = 0x9d; /* popf */
1335 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1338 buf
[i
++] = 0x61; /* popad */
1339 append_insns (&buildaddr
, i
, buf
);
1341 /* Now, adjust the original instruction to execute in the jump
1343 *adjusted_insn_addr
= buildaddr
;
1344 relocate_instruction (&buildaddr
, tpaddr
);
1345 *adjusted_insn_addr_end
= buildaddr
;
1347 /* Write the jump back to the program. */
1348 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1349 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1350 memcpy (buf
+ 1, &offset
, 4);
1351 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1353 /* The jump pad is now built. Wire in a jump to our jump pad. This
1354 is always done last (by our caller actually), so that we can
1355 install fast tracepoints with threads running. This relies on
1356 the agent's atomic write support. */
1359 /* Create a trampoline. */
1360 *trampoline_size
= sizeof (jump_insn
);
1361 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1363 /* No trampoline space available. */
1365 "E.Cannot allocate trampoline space needed for fast "
1366 "tracepoints on 4-byte instructions.");
1370 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1371 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1372 memcpy (buf
+ 1, &offset
, 4);
1373 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1375 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1376 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1377 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1378 memcpy (buf
+ 2, &offset
, 2);
1379 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1380 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1384 /* Else use a 32-bit relative jump instruction. */
1385 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1386 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1387 memcpy (buf
+ 1, &offset
, 4);
1388 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1389 *jjump_pad_insn_size
= sizeof (jump_insn
);
1392 /* Return the end address of our pad. */
1393 *jump_entry
= buildaddr
;
1399 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1400 CORE_ADDR collector
,
1403 CORE_ADDR
*jump_entry
,
1404 CORE_ADDR
*trampoline
,
1405 ULONGEST
*trampoline_size
,
1406 unsigned char *jjump_pad_insn
,
1407 ULONGEST
*jjump_pad_insn_size
,
1408 CORE_ADDR
*adjusted_insn_addr
,
1409 CORE_ADDR
*adjusted_insn_addr_end
,
1413 if (is_64bit_tdesc ())
1414 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1415 collector
, lockaddr
,
1416 orig_size
, jump_entry
,
1417 trampoline
, trampoline_size
,
1419 jjump_pad_insn_size
,
1421 adjusted_insn_addr_end
,
1425 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1426 collector
, lockaddr
,
1427 orig_size
, jump_entry
,
1428 trampoline
, trampoline_size
,
1430 jjump_pad_insn_size
,
1432 adjusted_insn_addr_end
,
1436 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1440 x86_get_min_fast_tracepoint_insn_len (void)
1442 static int warned_about_fast_tracepoints
= 0;
1445 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1446 used for fast tracepoints. */
1447 if (is_64bit_tdesc ())
1451 if (agent_loaded_p ())
1453 char errbuf
[IPA_BUFSIZ
];
1457 /* On x86, if trampolines are available, then 4-byte jump instructions
1458 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1459 with a 4-byte offset are used instead. */
1460 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1464 /* GDB has no channel to explain to user why a shorter fast
1465 tracepoint is not possible, but at least make GDBserver
1466 mention that something has gone awry. */
1467 if (!warned_about_fast_tracepoints
)
1469 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1470 warned_about_fast_tracepoints
= 1;
1477 /* Indicate that the minimum length is currently unknown since the IPA
1478 has not loaded yet. */
1484 add_insns (unsigned char *start
, int len
)
1486 CORE_ADDR buildaddr
= current_insn_ptr
;
1489 debug_printf ("Adding %d bytes of insn at %s\n",
1490 len
, paddress (buildaddr
));
1492 append_insns (&buildaddr
, len
, start
);
1493 current_insn_ptr
= buildaddr
;
1496 /* Our general strategy for emitting code is to avoid specifying raw
1497 bytes whenever possible, and instead copy a block of inline asm
1498 that is embedded in the function. This is a little messy, because
1499 we need to keep the compiler from discarding what looks like dead
1500 code, plus suppress various warnings. */
1502 #define EMIT_ASM(NAME, INSNS) \
1505 extern unsigned char start_ ## NAME, end_ ## NAME; \
1506 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1507 __asm__ ("jmp end_" #NAME "\n" \
1508 "\t" "start_" #NAME ":" \
1510 "\t" "end_" #NAME ":"); \
1515 #define EMIT_ASM32(NAME,INSNS) \
1518 extern unsigned char start_ ## NAME, end_ ## NAME; \
1519 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1520 __asm__ (".code32\n" \
1521 "\t" "jmp end_" #NAME "\n" \
1522 "\t" "start_" #NAME ":\n" \
1524 "\t" "end_" #NAME ":\n" \
1530 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1537 amd64_emit_prologue (void)
1539 EMIT_ASM (amd64_prologue
,
1541 "movq %rsp,%rbp\n\t"
1542 "sub $0x20,%rsp\n\t"
1543 "movq %rdi,-8(%rbp)\n\t"
1544 "movq %rsi,-16(%rbp)");
1549 amd64_emit_epilogue (void)
1551 EMIT_ASM (amd64_epilogue
,
1552 "movq -16(%rbp),%rdi\n\t"
1553 "movq %rax,(%rdi)\n\t"
1560 amd64_emit_add (void)
1562 EMIT_ASM (amd64_add
,
1563 "add (%rsp),%rax\n\t"
1564 "lea 0x8(%rsp),%rsp");
1568 amd64_emit_sub (void)
1570 EMIT_ASM (amd64_sub
,
1571 "sub %rax,(%rsp)\n\t"
1576 amd64_emit_mul (void)
1582 amd64_emit_lsh (void)
1588 amd64_emit_rsh_signed (void)
1594 amd64_emit_rsh_unsigned (void)
1600 amd64_emit_ext (int arg
)
1605 EMIT_ASM (amd64_ext_8
,
1611 EMIT_ASM (amd64_ext_16
,
1616 EMIT_ASM (amd64_ext_32
,
1625 amd64_emit_log_not (void)
1627 EMIT_ASM (amd64_log_not
,
1628 "test %rax,%rax\n\t"
1634 amd64_emit_bit_and (void)
1636 EMIT_ASM (amd64_and
,
1637 "and (%rsp),%rax\n\t"
1638 "lea 0x8(%rsp),%rsp");
1642 amd64_emit_bit_or (void)
1645 "or (%rsp),%rax\n\t"
1646 "lea 0x8(%rsp),%rsp");
1650 amd64_emit_bit_xor (void)
1652 EMIT_ASM (amd64_xor
,
1653 "xor (%rsp),%rax\n\t"
1654 "lea 0x8(%rsp),%rsp");
1658 amd64_emit_bit_not (void)
1660 EMIT_ASM (amd64_bit_not
,
1661 "xorq $0xffffffffffffffff,%rax");
1665 amd64_emit_equal (void)
1667 EMIT_ASM (amd64_equal
,
1668 "cmp %rax,(%rsp)\n\t"
1669 "je .Lamd64_equal_true\n\t"
1671 "jmp .Lamd64_equal_end\n\t"
1672 ".Lamd64_equal_true:\n\t"
1674 ".Lamd64_equal_end:\n\t"
1675 "lea 0x8(%rsp),%rsp");
1679 amd64_emit_less_signed (void)
1681 EMIT_ASM (amd64_less_signed
,
1682 "cmp %rax,(%rsp)\n\t"
1683 "jl .Lamd64_less_signed_true\n\t"
1685 "jmp .Lamd64_less_signed_end\n\t"
1686 ".Lamd64_less_signed_true:\n\t"
1688 ".Lamd64_less_signed_end:\n\t"
1689 "lea 0x8(%rsp),%rsp");
1693 amd64_emit_less_unsigned (void)
1695 EMIT_ASM (amd64_less_unsigned
,
1696 "cmp %rax,(%rsp)\n\t"
1697 "jb .Lamd64_less_unsigned_true\n\t"
1699 "jmp .Lamd64_less_unsigned_end\n\t"
1700 ".Lamd64_less_unsigned_true:\n\t"
1702 ".Lamd64_less_unsigned_end:\n\t"
1703 "lea 0x8(%rsp),%rsp");
1707 amd64_emit_ref (int size
)
1712 EMIT_ASM (amd64_ref1
,
1716 EMIT_ASM (amd64_ref2
,
1720 EMIT_ASM (amd64_ref4
,
1721 "movl (%rax),%eax");
1724 EMIT_ASM (amd64_ref8
,
1725 "movq (%rax),%rax");
1731 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1733 EMIT_ASM (amd64_if_goto
,
1737 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1745 amd64_emit_goto (int *offset_p
, int *size_p
)
1747 EMIT_ASM (amd64_goto
,
1748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1756 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1758 int diff
= (to
- (from
+ size
));
1759 unsigned char buf
[sizeof (int)];
1767 memcpy (buf
, &diff
, sizeof (int));
1768 write_inferior_memory (from
, buf
, sizeof (int));
1772 amd64_emit_const (LONGEST num
)
1774 unsigned char buf
[16];
1776 CORE_ADDR buildaddr
= current_insn_ptr
;
1779 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1780 memcpy (&buf
[i
], &num
, sizeof (num
));
1782 append_insns (&buildaddr
, i
, buf
);
1783 current_insn_ptr
= buildaddr
;
1787 amd64_emit_call (CORE_ADDR fn
)
1789 unsigned char buf
[16];
1791 CORE_ADDR buildaddr
;
1794 /* The destination function being in the shared library, may be
1795 >31-bits away off the compiled code pad. */
1797 buildaddr
= current_insn_ptr
;
1799 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1803 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1805 /* Offset is too large for a call. Use callq, but that requires
1806 a register, so avoid it if possible. Use r10, since it is
1807 call-clobbered, we don't have to push/pop it. */
1808 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1810 memcpy (buf
+ i
, &fn
, 8);
1812 buf
[i
++] = 0xff; /* callq *%r10 */
1817 int offset32
= offset64
; /* we know we can't overflow here. */
1819 buf
[i
++] = 0xe8; /* call <reladdr> */
1820 memcpy (buf
+ i
, &offset32
, 4);
1824 append_insns (&buildaddr
, i
, buf
);
1825 current_insn_ptr
= buildaddr
;
1829 amd64_emit_reg (int reg
)
1831 unsigned char buf
[16];
1833 CORE_ADDR buildaddr
;
1835 /* Assume raw_regs is still in %rdi. */
1836 buildaddr
= current_insn_ptr
;
1838 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1839 memcpy (&buf
[i
], ®
, sizeof (reg
));
1841 append_insns (&buildaddr
, i
, buf
);
1842 current_insn_ptr
= buildaddr
;
1843 amd64_emit_call (get_raw_reg_func_addr ());
1847 amd64_emit_pop (void)
1849 EMIT_ASM (amd64_pop
,
1854 amd64_emit_stack_flush (void)
1856 EMIT_ASM (amd64_stack_flush
,
1861 amd64_emit_zero_ext (int arg
)
1866 EMIT_ASM (amd64_zero_ext_8
,
1870 EMIT_ASM (amd64_zero_ext_16
,
1871 "and $0xffff,%rax");
1874 EMIT_ASM (amd64_zero_ext_32
,
1875 "mov $0xffffffff,%rcx\n\t"
1884 amd64_emit_swap (void)
1886 EMIT_ASM (amd64_swap
,
1893 amd64_emit_stack_adjust (int n
)
1895 unsigned char buf
[16];
1897 CORE_ADDR buildaddr
= current_insn_ptr
;
1900 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1904 /* This only handles adjustments up to 16, but we don't expect any more. */
1906 append_insns (&buildaddr
, i
, buf
);
1907 current_insn_ptr
= buildaddr
;
1910 /* FN's prototype is `LONGEST(*fn)(int)'. */
1913 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1915 unsigned char buf
[16];
1917 CORE_ADDR buildaddr
;
1919 buildaddr
= current_insn_ptr
;
1921 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1922 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1924 append_insns (&buildaddr
, i
, buf
);
1925 current_insn_ptr
= buildaddr
;
1926 amd64_emit_call (fn
);
1929 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1932 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1934 unsigned char buf
[16];
1936 CORE_ADDR buildaddr
;
1938 buildaddr
= current_insn_ptr
;
1940 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1941 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1943 append_insns (&buildaddr
, i
, buf
);
1944 current_insn_ptr
= buildaddr
;
1945 EMIT_ASM (amd64_void_call_2_a
,
1946 /* Save away a copy of the stack top. */
1948 /* Also pass top as the second argument. */
1950 amd64_emit_call (fn
);
1951 EMIT_ASM (amd64_void_call_2_b
,
1952 /* Restore the stack top, %rax may have been trashed. */
1957 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1960 "cmp %rax,(%rsp)\n\t"
1961 "jne .Lamd64_eq_fallthru\n\t"
1962 "lea 0x8(%rsp),%rsp\n\t"
1964 /* jmp, but don't trust the assembler to choose the right jump */
1965 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1966 ".Lamd64_eq_fallthru:\n\t"
1967 "lea 0x8(%rsp),%rsp\n\t"
1977 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
1980 "cmp %rax,(%rsp)\n\t"
1981 "je .Lamd64_ne_fallthru\n\t"
1982 "lea 0x8(%rsp),%rsp\n\t"
1984 /* jmp, but don't trust the assembler to choose the right jump */
1985 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1986 ".Lamd64_ne_fallthru:\n\t"
1987 "lea 0x8(%rsp),%rsp\n\t"
1997 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2000 "cmp %rax,(%rsp)\n\t"
2001 "jnl .Lamd64_lt_fallthru\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2004 /* jmp, but don't trust the assembler to choose the right jump */
2005 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2006 ".Lamd64_lt_fallthru:\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2017 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2020 "cmp %rax,(%rsp)\n\t"
2021 "jnle .Lamd64_le_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_le_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2037 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2040 "cmp %rax,(%rsp)\n\t"
2041 "jng .Lamd64_gt_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_gt_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2057 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnge .Lamd64_ge_fallthru\n\t"
2062 ".Lamd64_ge_jump:\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2065 /* jmp, but don't trust the assembler to choose the right jump */
2066 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2067 ".Lamd64_ge_fallthru:\n\t"
2068 "lea 0x8(%rsp),%rsp\n\t"
2077 struct emit_ops amd64_emit_ops
=
2079 amd64_emit_prologue
,
2080 amd64_emit_epilogue
,
2085 amd64_emit_rsh_signed
,
2086 amd64_emit_rsh_unsigned
,
2094 amd64_emit_less_signed
,
2095 amd64_emit_less_unsigned
,
2099 amd64_write_goto_address
,
2104 amd64_emit_stack_flush
,
2105 amd64_emit_zero_ext
,
2107 amd64_emit_stack_adjust
,
2108 amd64_emit_int_call_1
,
2109 amd64_emit_void_call_2
,
2118 #endif /* __x86_64__ */
2121 i386_emit_prologue (void)
2123 EMIT_ASM32 (i386_prologue
,
2127 /* At this point, the raw regs base address is at 8(%ebp), and the
2128 value pointer is at 12(%ebp). */
2132 i386_emit_epilogue (void)
2134 EMIT_ASM32 (i386_epilogue
,
2135 "mov 12(%ebp),%ecx\n\t"
2136 "mov %eax,(%ecx)\n\t"
2137 "mov %ebx,0x4(%ecx)\n\t"
2145 i386_emit_add (void)
2147 EMIT_ASM32 (i386_add
,
2148 "add (%esp),%eax\n\t"
2149 "adc 0x4(%esp),%ebx\n\t"
2150 "lea 0x8(%esp),%esp");
2154 i386_emit_sub (void)
2156 EMIT_ASM32 (i386_sub
,
2157 "subl %eax,(%esp)\n\t"
2158 "sbbl %ebx,4(%esp)\n\t"
2164 i386_emit_mul (void)
2170 i386_emit_lsh (void)
2176 i386_emit_rsh_signed (void)
2182 i386_emit_rsh_unsigned (void)
2188 i386_emit_ext (int arg
)
2193 EMIT_ASM32 (i386_ext_8
,
2196 "movl %eax,%ebx\n\t"
2200 EMIT_ASM32 (i386_ext_16
,
2202 "movl %eax,%ebx\n\t"
2206 EMIT_ASM32 (i386_ext_32
,
2207 "movl %eax,%ebx\n\t"
2216 i386_emit_log_not (void)
2218 EMIT_ASM32 (i386_log_not
,
2220 "test %eax,%eax\n\t"
2227 i386_emit_bit_and (void)
2229 EMIT_ASM32 (i386_and
,
2230 "and (%esp),%eax\n\t"
2231 "and 0x4(%esp),%ebx\n\t"
2232 "lea 0x8(%esp),%esp");
2236 i386_emit_bit_or (void)
2238 EMIT_ASM32 (i386_or
,
2239 "or (%esp),%eax\n\t"
2240 "or 0x4(%esp),%ebx\n\t"
2241 "lea 0x8(%esp),%esp");
2245 i386_emit_bit_xor (void)
2247 EMIT_ASM32 (i386_xor
,
2248 "xor (%esp),%eax\n\t"
2249 "xor 0x4(%esp),%ebx\n\t"
2250 "lea 0x8(%esp),%esp");
2254 i386_emit_bit_not (void)
2256 EMIT_ASM32 (i386_bit_not
,
2257 "xor $0xffffffff,%eax\n\t"
2258 "xor $0xffffffff,%ebx\n\t");
2262 i386_emit_equal (void)
2264 EMIT_ASM32 (i386_equal
,
2265 "cmpl %ebx,4(%esp)\n\t"
2266 "jne .Li386_equal_false\n\t"
2267 "cmpl %eax,(%esp)\n\t"
2268 "je .Li386_equal_true\n\t"
2269 ".Li386_equal_false:\n\t"
2271 "jmp .Li386_equal_end\n\t"
2272 ".Li386_equal_true:\n\t"
2274 ".Li386_equal_end:\n\t"
2276 "lea 0x8(%esp),%esp");
2280 i386_emit_less_signed (void)
2282 EMIT_ASM32 (i386_less_signed
,
2283 "cmpl %ebx,4(%esp)\n\t"
2284 "jl .Li386_less_signed_true\n\t"
2285 "jne .Li386_less_signed_false\n\t"
2286 "cmpl %eax,(%esp)\n\t"
2287 "jl .Li386_less_signed_true\n\t"
2288 ".Li386_less_signed_false:\n\t"
2290 "jmp .Li386_less_signed_end\n\t"
2291 ".Li386_less_signed_true:\n\t"
2293 ".Li386_less_signed_end:\n\t"
2295 "lea 0x8(%esp),%esp");
2299 i386_emit_less_unsigned (void)
2301 EMIT_ASM32 (i386_less_unsigned
,
2302 "cmpl %ebx,4(%esp)\n\t"
2303 "jb .Li386_less_unsigned_true\n\t"
2304 "jne .Li386_less_unsigned_false\n\t"
2305 "cmpl %eax,(%esp)\n\t"
2306 "jb .Li386_less_unsigned_true\n\t"
2307 ".Li386_less_unsigned_false:\n\t"
2309 "jmp .Li386_less_unsigned_end\n\t"
2310 ".Li386_less_unsigned_true:\n\t"
2312 ".Li386_less_unsigned_end:\n\t"
2314 "lea 0x8(%esp),%esp");
2318 i386_emit_ref (int size
)
2323 EMIT_ASM32 (i386_ref1
,
2327 EMIT_ASM32 (i386_ref2
,
2331 EMIT_ASM32 (i386_ref4
,
2332 "movl (%eax),%eax");
2335 EMIT_ASM32 (i386_ref8
,
2336 "movl 4(%eax),%ebx\n\t"
2337 "movl (%eax),%eax");
2343 i386_emit_if_goto (int *offset_p
, int *size_p
)
2345 EMIT_ASM32 (i386_if_goto
,
2351 /* Don't trust the assembler to choose the right jump */
2352 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2355 *offset_p
= 11; /* be sure that this matches the sequence above */
2361 i386_emit_goto (int *offset_p
, int *size_p
)
2363 EMIT_ASM32 (i386_goto
,
2364 /* Don't trust the assembler to choose the right jump */
2365 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2373 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2375 int diff
= (to
- (from
+ size
));
2376 unsigned char buf
[sizeof (int)];
2378 /* We're only doing 4-byte sizes at the moment. */
2385 memcpy (buf
, &diff
, sizeof (int));
2386 write_inferior_memory (from
, buf
, sizeof (int));
2390 i386_emit_const (LONGEST num
)
2392 unsigned char buf
[16];
2394 CORE_ADDR buildaddr
= current_insn_ptr
;
2397 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2398 lo
= num
& 0xffffffff;
2399 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2401 hi
= ((num
>> 32) & 0xffffffff);
2404 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2405 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2410 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2412 append_insns (&buildaddr
, i
, buf
);
2413 current_insn_ptr
= buildaddr
;
2417 i386_emit_call (CORE_ADDR fn
)
2419 unsigned char buf
[16];
2421 CORE_ADDR buildaddr
;
2423 buildaddr
= current_insn_ptr
;
2425 buf
[i
++] = 0xe8; /* call <reladdr> */
2426 offset
= ((int) fn
) - (buildaddr
+ 5);
2427 memcpy (buf
+ 1, &offset
, 4);
2428 append_insns (&buildaddr
, 5, buf
);
2429 current_insn_ptr
= buildaddr
;
2433 i386_emit_reg (int reg
)
2435 unsigned char buf
[16];
2437 CORE_ADDR buildaddr
;
2439 EMIT_ASM32 (i386_reg_a
,
2441 buildaddr
= current_insn_ptr
;
2443 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2444 memcpy (&buf
[i
], ®
, sizeof (reg
));
2446 append_insns (&buildaddr
, i
, buf
);
2447 current_insn_ptr
= buildaddr
;
2448 EMIT_ASM32 (i386_reg_b
,
2449 "mov %eax,4(%esp)\n\t"
2450 "mov 8(%ebp),%eax\n\t"
2452 i386_emit_call (get_raw_reg_func_addr ());
2453 EMIT_ASM32 (i386_reg_c
,
2455 "lea 0x8(%esp),%esp");
2459 i386_emit_pop (void)
2461 EMIT_ASM32 (i386_pop
,
2467 i386_emit_stack_flush (void)
2469 EMIT_ASM32 (i386_stack_flush
,
2475 i386_emit_zero_ext (int arg
)
2480 EMIT_ASM32 (i386_zero_ext_8
,
2481 "and $0xff,%eax\n\t"
2485 EMIT_ASM32 (i386_zero_ext_16
,
2486 "and $0xffff,%eax\n\t"
2490 EMIT_ASM32 (i386_zero_ext_32
,
2499 i386_emit_swap (void)
2501 EMIT_ASM32 (i386_swap
,
2511 i386_emit_stack_adjust (int n
)
2513 unsigned char buf
[16];
2515 CORE_ADDR buildaddr
= current_insn_ptr
;
2518 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2522 append_insns (&buildaddr
, i
, buf
);
2523 current_insn_ptr
= buildaddr
;
2526 /* FN's prototype is `LONGEST(*fn)(int)'. */
2529 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2531 unsigned char buf
[16];
2533 CORE_ADDR buildaddr
;
2535 EMIT_ASM32 (i386_int_call_1_a
,
2536 /* Reserve a bit of stack space. */
2538 /* Put the one argument on the stack. */
2539 buildaddr
= current_insn_ptr
;
2541 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2544 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2546 append_insns (&buildaddr
, i
, buf
);
2547 current_insn_ptr
= buildaddr
;
2548 i386_emit_call (fn
);
2549 EMIT_ASM32 (i386_int_call_1_c
,
2551 "lea 0x8(%esp),%esp");
2554 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2557 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2559 unsigned char buf
[16];
2561 CORE_ADDR buildaddr
;
2563 EMIT_ASM32 (i386_void_call_2_a
,
2564 /* Preserve %eax only; we don't have to worry about %ebx. */
2566 /* Reserve a bit of stack space for arguments. */
2567 "sub $0x10,%esp\n\t"
2568 /* Copy "top" to the second argument position. (Note that
2569 we can't assume function won't scribble on its
2570 arguments, so don't try to restore from this.) */
2571 "mov %eax,4(%esp)\n\t"
2572 "mov %ebx,8(%esp)");
2573 /* Put the first argument on the stack. */
2574 buildaddr
= current_insn_ptr
;
2576 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2579 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2581 append_insns (&buildaddr
, i
, buf
);
2582 current_insn_ptr
= buildaddr
;
2583 i386_emit_call (fn
);
2584 EMIT_ASM32 (i386_void_call_2_b
,
2585 "lea 0x10(%esp),%esp\n\t"
2586 /* Restore original stack top. */
2592 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2595 /* Check low half first, more likely to be decider */
2596 "cmpl %eax,(%esp)\n\t"
2597 "jne .Leq_fallthru\n\t"
2598 "cmpl %ebx,4(%esp)\n\t"
2599 "jne .Leq_fallthru\n\t"
2600 "lea 0x8(%esp),%esp\n\t"
2603 /* jmp, but don't trust the assembler to choose the right jump */
2604 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2605 ".Leq_fallthru:\n\t"
2606 "lea 0x8(%esp),%esp\n\t"
2617 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2620 /* Check low half first, more likely to be decider */
2621 "cmpl %eax,(%esp)\n\t"
2623 "cmpl %ebx,4(%esp)\n\t"
2624 "je .Lne_fallthru\n\t"
2626 "lea 0x8(%esp),%esp\n\t"
2629 /* jmp, but don't trust the assembler to choose the right jump */
2630 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2631 ".Lne_fallthru:\n\t"
2632 "lea 0x8(%esp),%esp\n\t"
2643 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2646 "cmpl %ebx,4(%esp)\n\t"
2648 "jne .Llt_fallthru\n\t"
2649 "cmpl %eax,(%esp)\n\t"
2650 "jnl .Llt_fallthru\n\t"
2652 "lea 0x8(%esp),%esp\n\t"
2655 /* jmp, but don't trust the assembler to choose the right jump */
2656 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2657 ".Llt_fallthru:\n\t"
2658 "lea 0x8(%esp),%esp\n\t"
2669 i386_emit_le_goto (int *offset_p
, int *size_p
)
2672 "cmpl %ebx,4(%esp)\n\t"
2674 "jne .Lle_fallthru\n\t"
2675 "cmpl %eax,(%esp)\n\t"
2676 "jnle .Lle_fallthru\n\t"
2678 "lea 0x8(%esp),%esp\n\t"
2681 /* jmp, but don't trust the assembler to choose the right jump */
2682 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2683 ".Lle_fallthru:\n\t"
2684 "lea 0x8(%esp),%esp\n\t"
2695 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2698 "cmpl %ebx,4(%esp)\n\t"
2700 "jne .Lgt_fallthru\n\t"
2701 "cmpl %eax,(%esp)\n\t"
2702 "jng .Lgt_fallthru\n\t"
2704 "lea 0x8(%esp),%esp\n\t"
2707 /* jmp, but don't trust the assembler to choose the right jump */
2708 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2709 ".Lgt_fallthru:\n\t"
2710 "lea 0x8(%esp),%esp\n\t"
2721 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2724 "cmpl %ebx,4(%esp)\n\t"
2726 "jne .Lge_fallthru\n\t"
2727 "cmpl %eax,(%esp)\n\t"
2728 "jnge .Lge_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Lge_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2746 struct emit_ops i386_emit_ops
=
2754 i386_emit_rsh_signed
,
2755 i386_emit_rsh_unsigned
,
2763 i386_emit_less_signed
,
2764 i386_emit_less_unsigned
,
2768 i386_write_goto_address
,
2773 i386_emit_stack_flush
,
2776 i386_emit_stack_adjust
,
2777 i386_emit_int_call_1
,
2778 i386_emit_void_call_2
,
2788 static struct emit_ops
*
2792 if (is_64bit_tdesc ())
2793 return &amd64_emit_ops
;
2796 return &i386_emit_ops
;
2799 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2801 static const gdb_byte
*
2802 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2804 *size
= x86_breakpoint_len
;
2805 return x86_breakpoint
;
2809 x86_supports_range_stepping (void)
2814 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2818 x86_supports_hardware_single_step (void)
2824 x86_get_ipa_tdesc_idx (void)
2826 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2827 const struct target_desc
*tdesc
= regcache
->tdesc
;
2830 return amd64_get_ipa_tdesc_idx (tdesc
);
2833 if (tdesc
== tdesc_i386_linux_no_xml
)
2834 return X86_TDESC_SSE
;
2836 return i386_get_ipa_tdesc_idx (tdesc
);
2839 /* This is initialized assuming an amd64 target.
2840 x86_arch_setup will correct it for i386 or amd64 targets. */
2842 struct linux_target_ops the_low_target
=
2845 x86_linux_regs_info
,
2846 x86_cannot_fetch_register
,
2847 x86_cannot_store_register
,
2848 NULL
, /* fetch_register */
2851 NULL
, /* breakpoint_kind_from_pc */
2852 x86_sw_breakpoint_from_kind
,
2856 x86_supports_z_point_type
,
2859 x86_stopped_by_watchpoint
,
2860 x86_stopped_data_address
,
2861 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2862 native i386 case (no registers smaller than an xfer unit), and are not
2863 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2866 /* need to fix up i386 siginfo if host is amd64 */
2868 x86_linux_new_process
,
2869 x86_linux_new_thread
,
2870 x86_linux_delete_thread
,
2872 x86_linux_prepare_to_resume
,
2873 x86_linux_process_qsupported
,
2874 x86_supports_tracepoints
,
2875 x86_get_thread_area
,
2876 x86_install_fast_tracepoint_jump_pad
,
2878 x86_get_min_fast_tracepoint_insn_len
,
2879 x86_supports_range_stepping
,
2880 NULL
, /* breakpoint_kind_from_current_state */
2881 x86_supports_hardware_single_step
,
2882 x86_get_syscall_trapinfo
,
2883 x86_get_ipa_tdesc_idx
,
2887 initialize_low_arch (void)
2889 /* Initialize the Linux target descriptions. */
2891 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2892 copy_target_description (tdesc_amd64_linux_no_xml
,
2893 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2895 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2899 initialize_low_tdesc ();
2902 tdesc_i386_linux_no_xml
= allocate_target_description ();
2903 copy_target_description (tdesc_i386_linux_no_xml
,
2904 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2905 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2907 initialize_regsets_info (&x86_regsets_info
);