1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
111 void low_arch_setup () override
;
114 /* The singleton target ops object. */
116 static x86_target the_x86_target
;
118 /* Per-process arch-specific data we want to keep. */
120 struct arch_process_info
122 struct x86_debug_reg_state debug_reg_state
;
127 /* Mapping between the general-purpose registers in `struct user'
128 format and GDB's register array layout.
129 Note that the transfer layout uses 64-bit regs. */
130 static /*const*/ int i386_regmap
[] =
132 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
133 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
134 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
135 DS
* 8, ES
* 8, FS
* 8, GS
* 8
138 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
140 /* So code below doesn't have to care, i386 or amd64. */
141 #define ORIG_EAX ORIG_RAX
144 static const int x86_64_regmap
[] =
146 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
147 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
148 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
149 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
150 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
151 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
152 -1, -1, -1, -1, -1, -1, -1, -1,
153 -1, -1, -1, -1, -1, -1, -1, -1,
154 -1, -1, -1, -1, -1, -1, -1, -1,
156 -1, -1, -1, -1, -1, -1, -1, -1,
158 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
163 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
164 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
165 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
168 -1, -1, -1, -1, -1, -1, -1, -1,
169 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
170 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 -1, -1, -1, -1, -1, -1, -1, -1,
177 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
178 #define X86_64_USER_REGS (GS + 1)
180 #else /* ! __x86_64__ */
182 /* Mapping between the general-purpose registers in `struct user'
183 format and GDB's register array layout. */
184 static /*const*/ int i386_regmap
[] =
186 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
187 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
188 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
189 DS
* 4, ES
* 4, FS
* 4, GS
* 4
192 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
200 /* Returns true if the current inferior belongs to a x86-64 process,
204 is_64bit_tdesc (void)
206 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
208 return register_size (regcache
->tdesc
, 0) == 8;
214 /* Called by libthread_db. */
217 ps_get_thread_area (struct ps_prochandle
*ph
,
218 lwpid_t lwpid
, int idx
, void **base
)
221 int use_64bit
= is_64bit_tdesc ();
228 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
232 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
243 unsigned int desc
[4];
245 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
246 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
249 /* Ensure we properly extend the value to 64-bits for x86_64. */
250 *base
= (void *) (uintptr_t) desc
[1];
255 /* Get the thread area address. This is used to recognize which
256 thread is which when tracing with the in-process agent library. We
257 don't read anything from the address, and treat it as opaque; it's
258 the address itself that we assume is unique per-thread. */
261 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
264 int use_64bit
= is_64bit_tdesc ();
269 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
271 *addr
= (CORE_ADDR
) (uintptr_t) base
;
280 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
281 struct thread_info
*thr
= get_lwp_thread (lwp
);
282 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
283 unsigned int desc
[4];
285 const int reg_thread_area
= 3; /* bits to scale down register value. */
288 collect_register_by_name (regcache
, "gs", &gs
);
290 idx
= gs
>> reg_thread_area
;
292 if (ptrace (PTRACE_GET_THREAD_AREA
,
294 (void *) (long) idx
, (unsigned long) &desc
) < 0)
305 x86_cannot_store_register (int regno
)
308 if (is_64bit_tdesc ())
312 return regno
>= I386_NUM_REGS
;
316 x86_cannot_fetch_register (int regno
)
319 if (is_64bit_tdesc ())
323 return regno
>= I386_NUM_REGS
;
327 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
332 if (register_size (regcache
->tdesc
, 0) == 8)
334 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
335 if (x86_64_regmap
[i
] != -1)
336 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
338 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
341 int lwpid
= lwpid_of (current_thread
);
343 collect_register_by_name (regcache
, "fs_base", &base
);
344 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
346 collect_register_by_name (regcache
, "gs_base", &base
);
347 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
354 /* 32-bit inferior registers need to be zero-extended.
355 Callers would read uninitialized memory otherwise. */
356 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
359 for (i
= 0; i
< I386_NUM_REGS
; i
++)
360 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
362 collect_register_by_name (regcache
, "orig_eax",
363 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
366 /* Sign extend EAX value to avoid potential syscall restart
369 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
370 for a detailed explanation. */
371 if (register_size (regcache
->tdesc
, 0) == 4)
373 void *ptr
= ((gdb_byte
*) buf
374 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
376 *(int64_t *) ptr
= *(int32_t *) ptr
;
382 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
387 if (register_size (regcache
->tdesc
, 0) == 8)
389 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
390 if (x86_64_regmap
[i
] != -1)
391 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
393 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
396 int lwpid
= lwpid_of (current_thread
);
398 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
399 supply_register_by_name (regcache
, "fs_base", &base
);
401 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
402 supply_register_by_name (regcache
, "gs_base", &base
);
409 for (i
= 0; i
< I386_NUM_REGS
; i
++)
410 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
412 supply_register_by_name (regcache
, "orig_eax",
413 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
417 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
420 i387_cache_to_fxsave (regcache
, buf
);
422 i387_cache_to_fsave (regcache
, buf
);
427 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
430 i387_fxsave_to_cache (regcache
, buf
);
432 i387_fsave_to_cache (regcache
, buf
);
439 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
441 i387_cache_to_fxsave (regcache
, buf
);
445 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
447 i387_fxsave_to_cache (regcache
, buf
);
453 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
455 i387_cache_to_xsave (regcache
, buf
);
459 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
461 i387_xsave_to_cache (regcache
, buf
);
464 /* ??? The non-biarch i386 case stores all the i387 regs twice.
465 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
466 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
467 doesn't work. IWBN to avoid the duplication in the case where it
468 does work. Maybe the arch_setup routine could check whether it works
469 and update the supported regsets accordingly. */
471 static struct regset_info x86_regsets
[] =
473 #ifdef HAVE_PTRACE_GETREGS
474 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
476 x86_fill_gregset
, x86_store_gregset
},
477 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
478 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
480 # ifdef HAVE_PTRACE_GETFPXREGS
481 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
483 x86_fill_fpxregset
, x86_store_fpxregset
},
486 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
488 x86_fill_fpregset
, x86_store_fpregset
},
489 #endif /* HAVE_PTRACE_GETREGS */
494 x86_get_pc (struct regcache
*regcache
)
496 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
502 collect_register_by_name (regcache
, "rip", &pc
);
503 return (CORE_ADDR
) pc
;
509 collect_register_by_name (regcache
, "eip", &pc
);
510 return (CORE_ADDR
) pc
;
515 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
517 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
523 supply_register_by_name (regcache
, "rip", &newpc
);
529 supply_register_by_name (regcache
, "eip", &newpc
);
533 static const gdb_byte x86_breakpoint
[] = { 0xCC };
534 #define x86_breakpoint_len 1
537 x86_breakpoint_at (CORE_ADDR pc
)
541 the_target
->read_memory (pc
, &c
, 1);
548 /* Low-level function vector. */
549 struct x86_dr_low_type x86_dr_low
=
551 x86_linux_dr_set_control
,
552 x86_linux_dr_set_addr
,
553 x86_linux_dr_get_addr
,
554 x86_linux_dr_get_status
,
555 x86_linux_dr_get_control
,
559 /* Breakpoint/Watchpoint support. */
562 x86_supports_z_point_type (char z_type
)
568 case Z_PACKET_WRITE_WP
:
569 case Z_PACKET_ACCESS_WP
:
577 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
578 int size
, struct raw_breakpoint
*bp
)
580 struct process_info
*proc
= current_process ();
584 case raw_bkpt_type_hw
:
585 case raw_bkpt_type_write_wp
:
586 case raw_bkpt_type_access_wp
:
588 enum target_hw_bp_type hw_type
589 = raw_bkpt_type_to_target_hw_bp_type (type
);
590 struct x86_debug_reg_state
*state
591 = &proc
->priv
->arch_private
->debug_reg_state
;
593 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
603 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
604 int size
, struct raw_breakpoint
*bp
)
606 struct process_info
*proc
= current_process ();
610 case raw_bkpt_type_hw
:
611 case raw_bkpt_type_write_wp
:
612 case raw_bkpt_type_access_wp
:
614 enum target_hw_bp_type hw_type
615 = raw_bkpt_type_to_target_hw_bp_type (type
);
616 struct x86_debug_reg_state
*state
617 = &proc
->priv
->arch_private
->debug_reg_state
;
619 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
628 x86_stopped_by_watchpoint (void)
630 struct process_info
*proc
= current_process ();
631 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
635 x86_stopped_data_address (void)
637 struct process_info
*proc
= current_process ();
639 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
645 /* Called when a new process is created. */
647 static struct arch_process_info
*
648 x86_linux_new_process (void)
650 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
652 x86_low_init_dregs (&info
->debug_reg_state
);
657 /* Called when a process is being deleted. */
660 x86_linux_delete_process (struct arch_process_info
*info
)
665 /* Target routine for linux_new_fork. */
668 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
670 /* These are allocated by linux_add_process. */
671 gdb_assert (parent
->priv
!= NULL
672 && parent
->priv
->arch_private
!= NULL
);
673 gdb_assert (child
->priv
!= NULL
674 && child
->priv
->arch_private
!= NULL
);
676 /* Linux kernel before 2.6.33 commit
677 72f674d203cd230426437cdcf7dd6f681dad8b0d
678 will inherit hardware debug registers from parent
679 on fork/vfork/clone. Newer Linux kernels create such tasks with
680 zeroed debug registers.
682 GDB core assumes the child inherits the watchpoints/hw
683 breakpoints of the parent, and will remove them all from the
684 forked off process. Copy the debug registers mirrors into the
685 new process so that all breakpoints and watchpoints can be
686 removed together. The debug registers mirror will become zeroed
687 in the end before detaching the forked off process, thus making
688 this compatible with older Linux kernels too. */
690 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
693 /* See nat/x86-dregs.h. */
695 struct x86_debug_reg_state
*
696 x86_debug_reg_state (pid_t pid
)
698 struct process_info
*proc
= find_process_pid (pid
);
700 return &proc
->priv
->arch_private
->debug_reg_state
;
703 /* When GDBSERVER is built as a 64-bit application on linux, the
704 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
705 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
706 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
707 conversion in-place ourselves. */
709 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
710 layout of the inferiors' architecture. Returns true if any
711 conversion was done; false otherwise. If DIRECTION is 1, then copy
712 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
716 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
719 unsigned int machine
;
720 int tid
= lwpid_of (current_thread
);
721 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
723 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
724 if (!is_64bit_tdesc ())
725 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
727 /* No fixup for native x32 GDB. */
728 else if (!is_elf64
&& sizeof (void *) == 8)
729 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
738 /* Format of XSAVE extended state is:
742 sw_usable_bytes[464..511]
743 xstate_hdr_bytes[512..575]
748 Same memory layout will be used for the coredump NT_X86_XSTATE
749 representing the XSAVE extended state registers.
751 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
752 extended state mask, which is the same as the extended control register
753 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
754 together with the mask saved in the xstate_hdr_bytes to determine what
755 states the processor/OS supports and what state, used or initialized,
756 the process/thread is in. */
757 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
759 /* Does the current host support the GETFPXREGS request? The header
760 file may or may not define it, and even if it is defined, the
761 kernel will return EIO if it's running on a pre-SSE processor. */
762 int have_ptrace_getfpxregs
=
763 #ifdef HAVE_PTRACE_GETFPXREGS
770 /* Get Linux/x86 target description from running target. */
772 static const struct target_desc
*
773 x86_linux_read_description (void)
775 unsigned int machine
;
779 static uint64_t xcr0
;
780 struct regset_info
*regset
;
782 tid
= lwpid_of (current_thread
);
784 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
786 if (sizeof (void *) == 4)
789 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
791 else if (machine
== EM_X86_64
)
792 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
796 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
797 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
799 elf_fpxregset_t fpxregs
;
801 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
803 have_ptrace_getfpxregs
= 0;
804 have_ptrace_getregset
= 0;
805 return i386_linux_read_description (X86_XSTATE_X87
);
808 have_ptrace_getfpxregs
= 1;
814 x86_xcr0
= X86_XSTATE_SSE_MASK
;
818 if (machine
== EM_X86_64
)
819 return tdesc_amd64_linux_no_xml
;
822 return tdesc_i386_linux_no_xml
;
825 if (have_ptrace_getregset
== -1)
827 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
830 iov
.iov_base
= xstateregs
;
831 iov
.iov_len
= sizeof (xstateregs
);
833 /* Check if PTRACE_GETREGSET works. */
834 if (ptrace (PTRACE_GETREGSET
, tid
,
835 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
836 have_ptrace_getregset
= 0;
839 have_ptrace_getregset
= 1;
841 /* Get XCR0 from XSAVE extended state. */
842 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
843 / sizeof (uint64_t))];
845 /* Use PTRACE_GETREGSET if it is available. */
846 for (regset
= x86_regsets
;
847 regset
->fill_function
!= NULL
; regset
++)
848 if (regset
->get_request
== PTRACE_GETREGSET
)
849 regset
->size
= X86_XSTATE_SIZE (xcr0
);
850 else if (regset
->type
!= GENERAL_REGS
)
855 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
856 xcr0_features
= (have_ptrace_getregset
857 && (xcr0
& X86_XSTATE_ALL_MASK
));
862 if (machine
== EM_X86_64
)
865 const target_desc
*tdesc
= NULL
;
869 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
874 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
880 const target_desc
*tdesc
= NULL
;
883 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
886 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
891 gdb_assert_not_reached ("failed to return tdesc");
894 /* Update all the target description of all processes; a new GDB
895 connected, and it may or not support xml target descriptions. */
898 x86_target::update_xmltarget ()
900 struct thread_info
*saved_thread
= current_thread
;
902 /* Before changing the register cache's internal layout, flush the
903 contents of the current valid caches back to the threads, and
904 release the current regcache objects. */
907 for_each_process ([this] (process_info
*proc
) {
910 /* Look up any thread of this process. */
911 current_thread
= find_any_thread_of_pid (pid
);
916 current_thread
= saved_thread
;
919 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
923 x86_linux_process_qsupported (char **features
, int count
)
927 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
928 with "i386" in qSupported query, it supports x86 XML target
931 for (i
= 0; i
< count
; i
++)
933 const char *feature
= features
[i
];
935 if (startswith (feature
, "xmlRegisters="))
937 char *copy
= xstrdup (feature
+ 13);
940 for (char *p
= strtok_r (copy
, ",", &saveptr
);
942 p
= strtok_r (NULL
, ",", &saveptr
))
944 if (strcmp (p
, "i386") == 0)
954 the_x86_target
.update_xmltarget ();
957 /* Common for x86/x86-64. */
959 static struct regsets_info x86_regsets_info
=
961 x86_regsets
, /* regsets */
963 NULL
, /* disabled_regsets */
967 static struct regs_info amd64_linux_regs_info
=
969 NULL
, /* regset_bitmap */
970 NULL
, /* usrregs_info */
974 static struct usrregs_info i386_linux_usrregs_info
=
980 static struct regs_info i386_linux_regs_info
=
982 NULL
, /* regset_bitmap */
983 &i386_linux_usrregs_info
,
988 x86_target::get_regs_info ()
991 if (is_64bit_tdesc ())
992 return &amd64_linux_regs_info
;
995 return &i386_linux_regs_info
;
998 /* Initialize the target description for the architecture of the
1002 x86_target::low_arch_setup ()
1004 current_process ()->tdesc
= x86_linux_read_description ();
1007 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1008 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1011 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1013 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1019 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1020 *sysno
= (int) l_sysno
;
1023 collect_register_by_name (regcache
, "orig_eax", sysno
);
1027 x86_supports_tracepoints (void)
1033 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1035 target_write_memory (*to
, buf
, len
);
1040 push_opcode (unsigned char *buf
, const char *op
)
1042 unsigned char *buf_org
= buf
;
1047 unsigned long ul
= strtoul (op
, &endptr
, 16);
1056 return buf
- buf_org
;
1061 /* Build a jump pad that saves registers and calls a collection
1062 function. Writes a jump instruction to the jump pad to
1063 JJUMPAD_INSN. The caller is responsible to write it in at the
1064 tracepoint address. */
1067 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1068 CORE_ADDR collector
,
1071 CORE_ADDR
*jump_entry
,
1072 CORE_ADDR
*trampoline
,
1073 ULONGEST
*trampoline_size
,
1074 unsigned char *jjump_pad_insn
,
1075 ULONGEST
*jjump_pad_insn_size
,
1076 CORE_ADDR
*adjusted_insn_addr
,
1077 CORE_ADDR
*adjusted_insn_addr_end
,
1080 unsigned char buf
[40];
1084 CORE_ADDR buildaddr
= *jump_entry
;
1086 /* Build the jump pad. */
1088 /* First, do tracepoint data collection. Save registers. */
1090 /* Need to ensure stack pointer saved first. */
1091 buf
[i
++] = 0x54; /* push %rsp */
1092 buf
[i
++] = 0x55; /* push %rbp */
1093 buf
[i
++] = 0x57; /* push %rdi */
1094 buf
[i
++] = 0x56; /* push %rsi */
1095 buf
[i
++] = 0x52; /* push %rdx */
1096 buf
[i
++] = 0x51; /* push %rcx */
1097 buf
[i
++] = 0x53; /* push %rbx */
1098 buf
[i
++] = 0x50; /* push %rax */
1099 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1100 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1101 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1102 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1103 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1104 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1105 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1106 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1107 buf
[i
++] = 0x9c; /* pushfq */
1108 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1110 memcpy (buf
+ i
, &tpaddr
, 8);
1112 buf
[i
++] = 0x57; /* push %rdi */
1113 append_insns (&buildaddr
, i
, buf
);
1115 /* Stack space for the collecting_t object. */
1117 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1118 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1119 memcpy (buf
+ i
, &tpoint
, 8);
1121 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1122 i
+= push_opcode (&buf
[i
],
1123 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1124 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1125 append_insns (&buildaddr
, i
, buf
);
1129 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1130 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1132 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1133 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1134 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1135 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1136 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1137 append_insns (&buildaddr
, i
, buf
);
1139 /* Set up the gdb_collect call. */
1140 /* At this point, (stack pointer + 0x18) is the base of our saved
1144 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1145 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1147 /* tpoint address may be 64-bit wide. */
1148 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1149 memcpy (buf
+ i
, &tpoint
, 8);
1151 append_insns (&buildaddr
, i
, buf
);
1153 /* The collector function being in the shared library, may be
1154 >31-bits away off the jump pad. */
1156 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1157 memcpy (buf
+ i
, &collector
, 8);
1159 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1160 append_insns (&buildaddr
, i
, buf
);
1162 /* Clear the spin-lock. */
1164 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1165 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1166 memcpy (buf
+ i
, &lockaddr
, 8);
1168 append_insns (&buildaddr
, i
, buf
);
1170 /* Remove stack that had been used for the collect_t object. */
1172 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1173 append_insns (&buildaddr
, i
, buf
);
1175 /* Restore register state. */
1177 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1181 buf
[i
++] = 0x9d; /* popfq */
1182 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1183 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1184 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1185 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1186 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1187 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1188 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1189 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1190 buf
[i
++] = 0x58; /* pop %rax */
1191 buf
[i
++] = 0x5b; /* pop %rbx */
1192 buf
[i
++] = 0x59; /* pop %rcx */
1193 buf
[i
++] = 0x5a; /* pop %rdx */
1194 buf
[i
++] = 0x5e; /* pop %rsi */
1195 buf
[i
++] = 0x5f; /* pop %rdi */
1196 buf
[i
++] = 0x5d; /* pop %rbp */
1197 buf
[i
++] = 0x5c; /* pop %rsp */
1198 append_insns (&buildaddr
, i
, buf
);
1200 /* Now, adjust the original instruction to execute in the jump
1202 *adjusted_insn_addr
= buildaddr
;
1203 relocate_instruction (&buildaddr
, tpaddr
);
1204 *adjusted_insn_addr_end
= buildaddr
;
1206 /* Finally, write a jump back to the program. */
1208 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1209 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1212 "E.Jump back from jump pad too far from tracepoint "
1213 "(offset 0x%" PRIx64
" > int32).", loffset
);
1217 offset
= (int) loffset
;
1218 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1219 memcpy (buf
+ 1, &offset
, 4);
1220 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1222 /* The jump pad is now built. Wire in a jump to our jump pad. This
1223 is always done last (by our caller actually), so that we can
1224 install fast tracepoints with threads running. This relies on
1225 the agent's atomic write support. */
1226 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1227 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1230 "E.Jump pad too far from tracepoint "
1231 "(offset 0x%" PRIx64
" > int32).", loffset
);
1235 offset
= (int) loffset
;
1237 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1238 memcpy (buf
+ 1, &offset
, 4);
1239 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1240 *jjump_pad_insn_size
= sizeof (jump_insn
);
1242 /* Return the end address of our pad. */
1243 *jump_entry
= buildaddr
;
1248 #endif /* __x86_64__ */
1250 /* Build a jump pad that saves registers and calls a collection
1251 function. Writes a jump instruction to the jump pad to
1252 JJUMPAD_INSN. The caller is responsible to write it in at the
1253 tracepoint address. */
1256 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1257 CORE_ADDR collector
,
1260 CORE_ADDR
*jump_entry
,
1261 CORE_ADDR
*trampoline
,
1262 ULONGEST
*trampoline_size
,
1263 unsigned char *jjump_pad_insn
,
1264 ULONGEST
*jjump_pad_insn_size
,
1265 CORE_ADDR
*adjusted_insn_addr
,
1266 CORE_ADDR
*adjusted_insn_addr_end
,
1269 unsigned char buf
[0x100];
1271 CORE_ADDR buildaddr
= *jump_entry
;
1273 /* Build the jump pad. */
1275 /* First, do tracepoint data collection. Save registers. */
1277 buf
[i
++] = 0x60; /* pushad */
1278 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1279 *((int *)(buf
+ i
)) = (int) tpaddr
;
1281 buf
[i
++] = 0x9c; /* pushf */
1282 buf
[i
++] = 0x1e; /* push %ds */
1283 buf
[i
++] = 0x06; /* push %es */
1284 buf
[i
++] = 0x0f; /* push %fs */
1286 buf
[i
++] = 0x0f; /* push %gs */
1288 buf
[i
++] = 0x16; /* push %ss */
1289 buf
[i
++] = 0x0e; /* push %cs */
1290 append_insns (&buildaddr
, i
, buf
);
1292 /* Stack space for the collecting_t object. */
1294 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1296 /* Build the object. */
1297 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1298 memcpy (buf
+ i
, &tpoint
, 4);
1300 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1302 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1303 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1304 append_insns (&buildaddr
, i
, buf
);
1306 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1307 If we cared for it, this could be using xchg alternatively. */
1310 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1311 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1313 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1315 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1316 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1317 append_insns (&buildaddr
, i
, buf
);
1320 /* Set up arguments to the gdb_collect call. */
1322 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1323 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1324 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1325 append_insns (&buildaddr
, i
, buf
);
1328 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1329 append_insns (&buildaddr
, i
, buf
);
1332 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1333 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1335 append_insns (&buildaddr
, i
, buf
);
1337 buf
[0] = 0xe8; /* call <reladdr> */
1338 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1339 memcpy (buf
+ 1, &offset
, 4);
1340 append_insns (&buildaddr
, 5, buf
);
1341 /* Clean up after the call. */
1342 buf
[0] = 0x83; /* add $0x8,%esp */
1345 append_insns (&buildaddr
, 3, buf
);
1348 /* Clear the spin-lock. This would need the LOCK prefix on older
1351 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1352 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1353 memcpy (buf
+ i
, &lockaddr
, 4);
1355 append_insns (&buildaddr
, i
, buf
);
1358 /* Remove stack that had been used for the collect_t object. */
1360 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1361 append_insns (&buildaddr
, i
, buf
);
1364 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1367 buf
[i
++] = 0x17; /* pop %ss */
1368 buf
[i
++] = 0x0f; /* pop %gs */
1370 buf
[i
++] = 0x0f; /* pop %fs */
1372 buf
[i
++] = 0x07; /* pop %es */
1373 buf
[i
++] = 0x1f; /* pop %ds */
1374 buf
[i
++] = 0x9d; /* popf */
1375 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1378 buf
[i
++] = 0x61; /* popad */
1379 append_insns (&buildaddr
, i
, buf
);
1381 /* Now, adjust the original instruction to execute in the jump
1383 *adjusted_insn_addr
= buildaddr
;
1384 relocate_instruction (&buildaddr
, tpaddr
);
1385 *adjusted_insn_addr_end
= buildaddr
;
1387 /* Write the jump back to the program. */
1388 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1389 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1390 memcpy (buf
+ 1, &offset
, 4);
1391 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1393 /* The jump pad is now built. Wire in a jump to our jump pad. This
1394 is always done last (by our caller actually), so that we can
1395 install fast tracepoints with threads running. This relies on
1396 the agent's atomic write support. */
1399 /* Create a trampoline. */
1400 *trampoline_size
= sizeof (jump_insn
);
1401 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1403 /* No trampoline space available. */
1405 "E.Cannot allocate trampoline space needed for fast "
1406 "tracepoints on 4-byte instructions.");
1410 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1411 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1412 memcpy (buf
+ 1, &offset
, 4);
1413 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1415 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1416 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1417 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1418 memcpy (buf
+ 2, &offset
, 2);
1419 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1420 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1424 /* Else use a 32-bit relative jump instruction. */
1425 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1426 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1427 memcpy (buf
+ 1, &offset
, 4);
1428 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1429 *jjump_pad_insn_size
= sizeof (jump_insn
);
1432 /* Return the end address of our pad. */
1433 *jump_entry
= buildaddr
;
1439 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1440 CORE_ADDR collector
,
1443 CORE_ADDR
*jump_entry
,
1444 CORE_ADDR
*trampoline
,
1445 ULONGEST
*trampoline_size
,
1446 unsigned char *jjump_pad_insn
,
1447 ULONGEST
*jjump_pad_insn_size
,
1448 CORE_ADDR
*adjusted_insn_addr
,
1449 CORE_ADDR
*adjusted_insn_addr_end
,
1453 if (is_64bit_tdesc ())
1454 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1455 collector
, lockaddr
,
1456 orig_size
, jump_entry
,
1457 trampoline
, trampoline_size
,
1459 jjump_pad_insn_size
,
1461 adjusted_insn_addr_end
,
1465 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1466 collector
, lockaddr
,
1467 orig_size
, jump_entry
,
1468 trampoline
, trampoline_size
,
1470 jjump_pad_insn_size
,
1472 adjusted_insn_addr_end
,
1476 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1480 x86_get_min_fast_tracepoint_insn_len (void)
1482 static int warned_about_fast_tracepoints
= 0;
1485 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1486 used for fast tracepoints. */
1487 if (is_64bit_tdesc ())
1491 if (agent_loaded_p ())
1493 char errbuf
[IPA_BUFSIZ
];
1497 /* On x86, if trampolines are available, then 4-byte jump instructions
1498 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1499 with a 4-byte offset are used instead. */
1500 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1504 /* GDB has no channel to explain to user why a shorter fast
1505 tracepoint is not possible, but at least make GDBserver
1506 mention that something has gone awry. */
1507 if (!warned_about_fast_tracepoints
)
1509 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1510 warned_about_fast_tracepoints
= 1;
1517 /* Indicate that the minimum length is currently unknown since the IPA
1518 has not loaded yet. */
1524 add_insns (unsigned char *start
, int len
)
1526 CORE_ADDR buildaddr
= current_insn_ptr
;
1529 debug_printf ("Adding %d bytes of insn at %s\n",
1530 len
, paddress (buildaddr
));
1532 append_insns (&buildaddr
, len
, start
);
1533 current_insn_ptr
= buildaddr
;
1536 /* Our general strategy for emitting code is to avoid specifying raw
1537 bytes whenever possible, and instead copy a block of inline asm
1538 that is embedded in the function. This is a little messy, because
1539 we need to keep the compiler from discarding what looks like dead
1540 code, plus suppress various warnings. */
1542 #define EMIT_ASM(NAME, INSNS) \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1547 __asm__ ("jmp end_" #NAME "\n" \
1548 "\t" "start_" #NAME ":" \
1550 "\t" "end_" #NAME ":"); \
1555 #define EMIT_ASM32(NAME,INSNS) \
1558 extern unsigned char start_ ## NAME, end_ ## NAME; \
1559 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1560 __asm__ (".code32\n" \
1561 "\t" "jmp end_" #NAME "\n" \
1562 "\t" "start_" #NAME ":\n" \
1564 "\t" "end_" #NAME ":\n" \
1570 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1577 amd64_emit_prologue (void)
1579 EMIT_ASM (amd64_prologue
,
1581 "movq %rsp,%rbp\n\t"
1582 "sub $0x20,%rsp\n\t"
1583 "movq %rdi,-8(%rbp)\n\t"
1584 "movq %rsi,-16(%rbp)");
1589 amd64_emit_epilogue (void)
1591 EMIT_ASM (amd64_epilogue
,
1592 "movq -16(%rbp),%rdi\n\t"
1593 "movq %rax,(%rdi)\n\t"
1600 amd64_emit_add (void)
1602 EMIT_ASM (amd64_add
,
1603 "add (%rsp),%rax\n\t"
1604 "lea 0x8(%rsp),%rsp");
1608 amd64_emit_sub (void)
1610 EMIT_ASM (amd64_sub
,
1611 "sub %rax,(%rsp)\n\t"
1616 amd64_emit_mul (void)
1622 amd64_emit_lsh (void)
1628 amd64_emit_rsh_signed (void)
1634 amd64_emit_rsh_unsigned (void)
1640 amd64_emit_ext (int arg
)
1645 EMIT_ASM (amd64_ext_8
,
1651 EMIT_ASM (amd64_ext_16
,
1656 EMIT_ASM (amd64_ext_32
,
1665 amd64_emit_log_not (void)
1667 EMIT_ASM (amd64_log_not
,
1668 "test %rax,%rax\n\t"
1674 amd64_emit_bit_and (void)
1676 EMIT_ASM (amd64_and
,
1677 "and (%rsp),%rax\n\t"
1678 "lea 0x8(%rsp),%rsp");
1682 amd64_emit_bit_or (void)
1685 "or (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1690 amd64_emit_bit_xor (void)
1692 EMIT_ASM (amd64_xor
,
1693 "xor (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1698 amd64_emit_bit_not (void)
1700 EMIT_ASM (amd64_bit_not
,
1701 "xorq $0xffffffffffffffff,%rax");
1705 amd64_emit_equal (void)
1707 EMIT_ASM (amd64_equal
,
1708 "cmp %rax,(%rsp)\n\t"
1709 "je .Lamd64_equal_true\n\t"
1711 "jmp .Lamd64_equal_end\n\t"
1712 ".Lamd64_equal_true:\n\t"
1714 ".Lamd64_equal_end:\n\t"
1715 "lea 0x8(%rsp),%rsp");
1719 amd64_emit_less_signed (void)
1721 EMIT_ASM (amd64_less_signed
,
1722 "cmp %rax,(%rsp)\n\t"
1723 "jl .Lamd64_less_signed_true\n\t"
1725 "jmp .Lamd64_less_signed_end\n\t"
1726 ".Lamd64_less_signed_true:\n\t"
1728 ".Lamd64_less_signed_end:\n\t"
1729 "lea 0x8(%rsp),%rsp");
1733 amd64_emit_less_unsigned (void)
1735 EMIT_ASM (amd64_less_unsigned
,
1736 "cmp %rax,(%rsp)\n\t"
1737 "jb .Lamd64_less_unsigned_true\n\t"
1739 "jmp .Lamd64_less_unsigned_end\n\t"
1740 ".Lamd64_less_unsigned_true:\n\t"
1742 ".Lamd64_less_unsigned_end:\n\t"
1743 "lea 0x8(%rsp),%rsp");
1747 amd64_emit_ref (int size
)
1752 EMIT_ASM (amd64_ref1
,
1756 EMIT_ASM (amd64_ref2
,
1760 EMIT_ASM (amd64_ref4
,
1761 "movl (%rax),%eax");
1764 EMIT_ASM (amd64_ref8
,
1765 "movq (%rax),%rax");
1771 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1773 EMIT_ASM (amd64_if_goto
,
1777 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1785 amd64_emit_goto (int *offset_p
, int *size_p
)
1787 EMIT_ASM (amd64_goto
,
1788 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1796 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1798 int diff
= (to
- (from
+ size
));
1799 unsigned char buf
[sizeof (int)];
1807 memcpy (buf
, &diff
, sizeof (int));
1808 target_write_memory (from
, buf
, sizeof (int));
1812 amd64_emit_const (LONGEST num
)
1814 unsigned char buf
[16];
1816 CORE_ADDR buildaddr
= current_insn_ptr
;
1819 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1820 memcpy (&buf
[i
], &num
, sizeof (num
));
1822 append_insns (&buildaddr
, i
, buf
);
1823 current_insn_ptr
= buildaddr
;
1827 amd64_emit_call (CORE_ADDR fn
)
1829 unsigned char buf
[16];
1831 CORE_ADDR buildaddr
;
1834 /* The destination function being in the shared library, may be
1835 >31-bits away off the compiled code pad. */
1837 buildaddr
= current_insn_ptr
;
1839 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1843 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1845 /* Offset is too large for a call. Use callq, but that requires
1846 a register, so avoid it if possible. Use r10, since it is
1847 call-clobbered, we don't have to push/pop it. */
1848 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1850 memcpy (buf
+ i
, &fn
, 8);
1852 buf
[i
++] = 0xff; /* callq *%r10 */
1857 int offset32
= offset64
; /* we know we can't overflow here. */
1859 buf
[i
++] = 0xe8; /* call <reladdr> */
1860 memcpy (buf
+ i
, &offset32
, 4);
1864 append_insns (&buildaddr
, i
, buf
);
1865 current_insn_ptr
= buildaddr
;
1869 amd64_emit_reg (int reg
)
1871 unsigned char buf
[16];
1873 CORE_ADDR buildaddr
;
1875 /* Assume raw_regs is still in %rdi. */
1876 buildaddr
= current_insn_ptr
;
1878 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1879 memcpy (&buf
[i
], ®
, sizeof (reg
));
1881 append_insns (&buildaddr
, i
, buf
);
1882 current_insn_ptr
= buildaddr
;
1883 amd64_emit_call (get_raw_reg_func_addr ());
1887 amd64_emit_pop (void)
1889 EMIT_ASM (amd64_pop
,
1894 amd64_emit_stack_flush (void)
1896 EMIT_ASM (amd64_stack_flush
,
1901 amd64_emit_zero_ext (int arg
)
1906 EMIT_ASM (amd64_zero_ext_8
,
1910 EMIT_ASM (amd64_zero_ext_16
,
1911 "and $0xffff,%rax");
1914 EMIT_ASM (amd64_zero_ext_32
,
1915 "mov $0xffffffff,%rcx\n\t"
1924 amd64_emit_swap (void)
1926 EMIT_ASM (amd64_swap
,
1933 amd64_emit_stack_adjust (int n
)
1935 unsigned char buf
[16];
1937 CORE_ADDR buildaddr
= current_insn_ptr
;
1940 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1944 /* This only handles adjustments up to 16, but we don't expect any more. */
1946 append_insns (&buildaddr
, i
, buf
);
1947 current_insn_ptr
= buildaddr
;
1950 /* FN's prototype is `LONGEST(*fn)(int)'. */
1953 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1955 unsigned char buf
[16];
1957 CORE_ADDR buildaddr
;
1959 buildaddr
= current_insn_ptr
;
1961 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1962 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1964 append_insns (&buildaddr
, i
, buf
);
1965 current_insn_ptr
= buildaddr
;
1966 amd64_emit_call (fn
);
1969 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1972 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1974 unsigned char buf
[16];
1976 CORE_ADDR buildaddr
;
1978 buildaddr
= current_insn_ptr
;
1980 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1981 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1983 append_insns (&buildaddr
, i
, buf
);
1984 current_insn_ptr
= buildaddr
;
1985 EMIT_ASM (amd64_void_call_2_a
,
1986 /* Save away a copy of the stack top. */
1988 /* Also pass top as the second argument. */
1990 amd64_emit_call (fn
);
1991 EMIT_ASM (amd64_void_call_2_b
,
1992 /* Restore the stack top, %rax may have been trashed. */
1997 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2000 "cmp %rax,(%rsp)\n\t"
2001 "jne .Lamd64_eq_fallthru\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2004 /* jmp, but don't trust the assembler to choose the right jump */
2005 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2006 ".Lamd64_eq_fallthru:\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2017 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2020 "cmp %rax,(%rsp)\n\t"
2021 "je .Lamd64_ne_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_ne_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2037 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2040 "cmp %rax,(%rsp)\n\t"
2041 "jnl .Lamd64_lt_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_lt_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2057 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnle .Lamd64_le_fallthru\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2064 /* jmp, but don't trust the assembler to choose the right jump */
2065 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2066 ".Lamd64_le_fallthru:\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2077 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2080 "cmp %rax,(%rsp)\n\t"
2081 "jng .Lamd64_gt_fallthru\n\t"
2082 "lea 0x8(%rsp),%rsp\n\t"
2084 /* jmp, but don't trust the assembler to choose the right jump */
2085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2086 ".Lamd64_gt_fallthru:\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2097 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2100 "cmp %rax,(%rsp)\n\t"
2101 "jnge .Lamd64_ge_fallthru\n\t"
2102 ".Lamd64_ge_jump:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2105 /* jmp, but don't trust the assembler to choose the right jump */
2106 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2107 ".Lamd64_ge_fallthru:\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2117 struct emit_ops amd64_emit_ops
=
2119 amd64_emit_prologue
,
2120 amd64_emit_epilogue
,
2125 amd64_emit_rsh_signed
,
2126 amd64_emit_rsh_unsigned
,
2134 amd64_emit_less_signed
,
2135 amd64_emit_less_unsigned
,
2139 amd64_write_goto_address
,
2144 amd64_emit_stack_flush
,
2145 amd64_emit_zero_ext
,
2147 amd64_emit_stack_adjust
,
2148 amd64_emit_int_call_1
,
2149 amd64_emit_void_call_2
,
2158 #endif /* __x86_64__ */
2161 i386_emit_prologue (void)
2163 EMIT_ASM32 (i386_prologue
,
2167 /* At this point, the raw regs base address is at 8(%ebp), and the
2168 value pointer is at 12(%ebp). */
2172 i386_emit_epilogue (void)
2174 EMIT_ASM32 (i386_epilogue
,
2175 "mov 12(%ebp),%ecx\n\t"
2176 "mov %eax,(%ecx)\n\t"
2177 "mov %ebx,0x4(%ecx)\n\t"
2185 i386_emit_add (void)
2187 EMIT_ASM32 (i386_add
,
2188 "add (%esp),%eax\n\t"
2189 "adc 0x4(%esp),%ebx\n\t"
2190 "lea 0x8(%esp),%esp");
2194 i386_emit_sub (void)
2196 EMIT_ASM32 (i386_sub
,
2197 "subl %eax,(%esp)\n\t"
2198 "sbbl %ebx,4(%esp)\n\t"
2204 i386_emit_mul (void)
2210 i386_emit_lsh (void)
2216 i386_emit_rsh_signed (void)
2222 i386_emit_rsh_unsigned (void)
2228 i386_emit_ext (int arg
)
2233 EMIT_ASM32 (i386_ext_8
,
2236 "movl %eax,%ebx\n\t"
2240 EMIT_ASM32 (i386_ext_16
,
2242 "movl %eax,%ebx\n\t"
2246 EMIT_ASM32 (i386_ext_32
,
2247 "movl %eax,%ebx\n\t"
2256 i386_emit_log_not (void)
2258 EMIT_ASM32 (i386_log_not
,
2260 "test %eax,%eax\n\t"
2267 i386_emit_bit_and (void)
2269 EMIT_ASM32 (i386_and
,
2270 "and (%esp),%eax\n\t"
2271 "and 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2276 i386_emit_bit_or (void)
2278 EMIT_ASM32 (i386_or
,
2279 "or (%esp),%eax\n\t"
2280 "or 0x4(%esp),%ebx\n\t"
2281 "lea 0x8(%esp),%esp");
2285 i386_emit_bit_xor (void)
2287 EMIT_ASM32 (i386_xor
,
2288 "xor (%esp),%eax\n\t"
2289 "xor 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2294 i386_emit_bit_not (void)
2296 EMIT_ASM32 (i386_bit_not
,
2297 "xor $0xffffffff,%eax\n\t"
2298 "xor $0xffffffff,%ebx\n\t");
2302 i386_emit_equal (void)
2304 EMIT_ASM32 (i386_equal
,
2305 "cmpl %ebx,4(%esp)\n\t"
2306 "jne .Li386_equal_false\n\t"
2307 "cmpl %eax,(%esp)\n\t"
2308 "je .Li386_equal_true\n\t"
2309 ".Li386_equal_false:\n\t"
2311 "jmp .Li386_equal_end\n\t"
2312 ".Li386_equal_true:\n\t"
2314 ".Li386_equal_end:\n\t"
2316 "lea 0x8(%esp),%esp");
2320 i386_emit_less_signed (void)
2322 EMIT_ASM32 (i386_less_signed
,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jl .Li386_less_signed_true\n\t"
2325 "jne .Li386_less_signed_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "jl .Li386_less_signed_true\n\t"
2328 ".Li386_less_signed_false:\n\t"
2330 "jmp .Li386_less_signed_end\n\t"
2331 ".Li386_less_signed_true:\n\t"
2333 ".Li386_less_signed_end:\n\t"
2335 "lea 0x8(%esp),%esp");
2339 i386_emit_less_unsigned (void)
2341 EMIT_ASM32 (i386_less_unsigned
,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jb .Li386_less_unsigned_true\n\t"
2344 "jne .Li386_less_unsigned_false\n\t"
2345 "cmpl %eax,(%esp)\n\t"
2346 "jb .Li386_less_unsigned_true\n\t"
2347 ".Li386_less_unsigned_false:\n\t"
2349 "jmp .Li386_less_unsigned_end\n\t"
2350 ".Li386_less_unsigned_true:\n\t"
2352 ".Li386_less_unsigned_end:\n\t"
2354 "lea 0x8(%esp),%esp");
2358 i386_emit_ref (int size
)
2363 EMIT_ASM32 (i386_ref1
,
2367 EMIT_ASM32 (i386_ref2
,
2371 EMIT_ASM32 (i386_ref4
,
2372 "movl (%eax),%eax");
2375 EMIT_ASM32 (i386_ref8
,
2376 "movl 4(%eax),%ebx\n\t"
2377 "movl (%eax),%eax");
2383 i386_emit_if_goto (int *offset_p
, int *size_p
)
2385 EMIT_ASM32 (i386_if_goto
,
2391 /* Don't trust the assembler to choose the right jump */
2392 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2395 *offset_p
= 11; /* be sure that this matches the sequence above */
2401 i386_emit_goto (int *offset_p
, int *size_p
)
2403 EMIT_ASM32 (i386_goto
,
2404 /* Don't trust the assembler to choose the right jump */
2405 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2413 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2415 int diff
= (to
- (from
+ size
));
2416 unsigned char buf
[sizeof (int)];
2418 /* We're only doing 4-byte sizes at the moment. */
2425 memcpy (buf
, &diff
, sizeof (int));
2426 target_write_memory (from
, buf
, sizeof (int));
2430 i386_emit_const (LONGEST num
)
2432 unsigned char buf
[16];
2434 CORE_ADDR buildaddr
= current_insn_ptr
;
2437 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2438 lo
= num
& 0xffffffff;
2439 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2441 hi
= ((num
>> 32) & 0xffffffff);
2444 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2445 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2450 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2452 append_insns (&buildaddr
, i
, buf
);
2453 current_insn_ptr
= buildaddr
;
2457 i386_emit_call (CORE_ADDR fn
)
2459 unsigned char buf
[16];
2461 CORE_ADDR buildaddr
;
2463 buildaddr
= current_insn_ptr
;
2465 buf
[i
++] = 0xe8; /* call <reladdr> */
2466 offset
= ((int) fn
) - (buildaddr
+ 5);
2467 memcpy (buf
+ 1, &offset
, 4);
2468 append_insns (&buildaddr
, 5, buf
);
2469 current_insn_ptr
= buildaddr
;
2473 i386_emit_reg (int reg
)
2475 unsigned char buf
[16];
2477 CORE_ADDR buildaddr
;
2479 EMIT_ASM32 (i386_reg_a
,
2481 buildaddr
= current_insn_ptr
;
2483 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2484 memcpy (&buf
[i
], ®
, sizeof (reg
));
2486 append_insns (&buildaddr
, i
, buf
);
2487 current_insn_ptr
= buildaddr
;
2488 EMIT_ASM32 (i386_reg_b
,
2489 "mov %eax,4(%esp)\n\t"
2490 "mov 8(%ebp),%eax\n\t"
2492 i386_emit_call (get_raw_reg_func_addr ());
2493 EMIT_ASM32 (i386_reg_c
,
2495 "lea 0x8(%esp),%esp");
2499 i386_emit_pop (void)
2501 EMIT_ASM32 (i386_pop
,
2507 i386_emit_stack_flush (void)
2509 EMIT_ASM32 (i386_stack_flush
,
2515 i386_emit_zero_ext (int arg
)
2520 EMIT_ASM32 (i386_zero_ext_8
,
2521 "and $0xff,%eax\n\t"
2525 EMIT_ASM32 (i386_zero_ext_16
,
2526 "and $0xffff,%eax\n\t"
2530 EMIT_ASM32 (i386_zero_ext_32
,
2539 i386_emit_swap (void)
2541 EMIT_ASM32 (i386_swap
,
2551 i386_emit_stack_adjust (int n
)
2553 unsigned char buf
[16];
2555 CORE_ADDR buildaddr
= current_insn_ptr
;
2558 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2562 append_insns (&buildaddr
, i
, buf
);
2563 current_insn_ptr
= buildaddr
;
2566 /* FN's prototype is `LONGEST(*fn)(int)'. */
2569 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2571 unsigned char buf
[16];
2573 CORE_ADDR buildaddr
;
2575 EMIT_ASM32 (i386_int_call_1_a
,
2576 /* Reserve a bit of stack space. */
2578 /* Put the one argument on the stack. */
2579 buildaddr
= current_insn_ptr
;
2581 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2584 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2586 append_insns (&buildaddr
, i
, buf
);
2587 current_insn_ptr
= buildaddr
;
2588 i386_emit_call (fn
);
2589 EMIT_ASM32 (i386_int_call_1_c
,
2591 "lea 0x8(%esp),%esp");
2594 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2597 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2599 unsigned char buf
[16];
2601 CORE_ADDR buildaddr
;
2603 EMIT_ASM32 (i386_void_call_2_a
,
2604 /* Preserve %eax only; we don't have to worry about %ebx. */
2606 /* Reserve a bit of stack space for arguments. */
2607 "sub $0x10,%esp\n\t"
2608 /* Copy "top" to the second argument position. (Note that
2609 we can't assume function won't scribble on its
2610 arguments, so don't try to restore from this.) */
2611 "mov %eax,4(%esp)\n\t"
2612 "mov %ebx,8(%esp)");
2613 /* Put the first argument on the stack. */
2614 buildaddr
= current_insn_ptr
;
2616 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2619 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2621 append_insns (&buildaddr
, i
, buf
);
2622 current_insn_ptr
= buildaddr
;
2623 i386_emit_call (fn
);
2624 EMIT_ASM32 (i386_void_call_2_b
,
2625 "lea 0x10(%esp),%esp\n\t"
2626 /* Restore original stack top. */
2632 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2635 /* Check low half first, more likely to be decider */
2636 "cmpl %eax,(%esp)\n\t"
2637 "jne .Leq_fallthru\n\t"
2638 "cmpl %ebx,4(%esp)\n\t"
2639 "jne .Leq_fallthru\n\t"
2640 "lea 0x8(%esp),%esp\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Leq_fallthru:\n\t"
2646 "lea 0x8(%esp),%esp\n\t"
2657 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2660 /* Check low half first, more likely to be decider */
2661 "cmpl %eax,(%esp)\n\t"
2663 "cmpl %ebx,4(%esp)\n\t"
2664 "je .Lne_fallthru\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2669 /* jmp, but don't trust the assembler to choose the right jump */
2670 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2671 ".Lne_fallthru:\n\t"
2672 "lea 0x8(%esp),%esp\n\t"
2683 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2686 "cmpl %ebx,4(%esp)\n\t"
2688 "jne .Llt_fallthru\n\t"
2689 "cmpl %eax,(%esp)\n\t"
2690 "jnl .Llt_fallthru\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2695 /* jmp, but don't trust the assembler to choose the right jump */
2696 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2697 ".Llt_fallthru:\n\t"
2698 "lea 0x8(%esp),%esp\n\t"
2709 i386_emit_le_goto (int *offset_p
, int *size_p
)
2712 "cmpl %ebx,4(%esp)\n\t"
2714 "jne .Lle_fallthru\n\t"
2715 "cmpl %eax,(%esp)\n\t"
2716 "jnle .Lle_fallthru\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2721 /* jmp, but don't trust the assembler to choose the right jump */
2722 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2723 ".Lle_fallthru:\n\t"
2724 "lea 0x8(%esp),%esp\n\t"
2735 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2738 "cmpl %ebx,4(%esp)\n\t"
2740 "jne .Lgt_fallthru\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jng .Lgt_fallthru\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Lgt_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2761 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2764 "cmpl %ebx,4(%esp)\n\t"
2766 "jne .Lge_fallthru\n\t"
2767 "cmpl %eax,(%esp)\n\t"
2768 "jnge .Lge_fallthru\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lge_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2786 struct emit_ops i386_emit_ops
=
2794 i386_emit_rsh_signed
,
2795 i386_emit_rsh_unsigned
,
2803 i386_emit_less_signed
,
2804 i386_emit_less_unsigned
,
2808 i386_write_goto_address
,
2813 i386_emit_stack_flush
,
2816 i386_emit_stack_adjust
,
2817 i386_emit_int_call_1
,
2818 i386_emit_void_call_2
,
2828 static struct emit_ops
*
2832 if (is_64bit_tdesc ())
2833 return &amd64_emit_ops
;
2836 return &i386_emit_ops
;
2839 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2841 static const gdb_byte
*
2842 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2844 *size
= x86_breakpoint_len
;
2845 return x86_breakpoint
;
2849 x86_supports_range_stepping (void)
2854 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2858 x86_supports_hardware_single_step (void)
2864 x86_get_ipa_tdesc_idx (void)
2866 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2867 const struct target_desc
*tdesc
= regcache
->tdesc
;
2870 return amd64_get_ipa_tdesc_idx (tdesc
);
2873 if (tdesc
== tdesc_i386_linux_no_xml
)
2874 return X86_TDESC_SSE
;
2876 return i386_get_ipa_tdesc_idx (tdesc
);
2879 /* This is initialized assuming an amd64 target.
2880 x86_arch_setup will correct it for i386 or amd64 targets. */
2882 struct linux_target_ops the_low_target
=
2884 x86_cannot_fetch_register
,
2885 x86_cannot_store_register
,
2886 NULL
, /* fetch_register */
2889 NULL
, /* breakpoint_kind_from_pc */
2890 x86_sw_breakpoint_from_kind
,
2894 x86_supports_z_point_type
,
2897 x86_stopped_by_watchpoint
,
2898 x86_stopped_data_address
,
2899 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2900 native i386 case (no registers smaller than an xfer unit), and are not
2901 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2904 /* need to fix up i386 siginfo if host is amd64 */
2906 x86_linux_new_process
,
2907 x86_linux_delete_process
,
2908 x86_linux_new_thread
,
2909 x86_linux_delete_thread
,
2911 x86_linux_prepare_to_resume
,
2912 x86_linux_process_qsupported
,
2913 x86_supports_tracepoints
,
2914 x86_get_thread_area
,
2915 x86_install_fast_tracepoint_jump_pad
,
2917 x86_get_min_fast_tracepoint_insn_len
,
2918 x86_supports_range_stepping
,
2919 NULL
, /* breakpoint_kind_from_current_state */
2920 x86_supports_hardware_single_step
,
2921 x86_get_syscall_trapinfo
,
2922 x86_get_ipa_tdesc_idx
,
2925 /* The linux target ops object. */
2927 linux_process_target
*the_linux_target
= &the_x86_target
;
2930 initialize_low_arch (void)
2932 /* Initialize the Linux target descriptions. */
2934 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2935 copy_target_description (tdesc_amd64_linux_no_xml
,
2936 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2938 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2941 tdesc_i386_linux_no_xml
= allocate_target_description ();
2942 copy_target_description (tdesc_i386_linux_no_xml
,
2943 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2944 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2946 initialize_regsets_info (&x86_regsets_info
);