1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2019 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "common/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "common/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Per-process arch-specific data we want to keep. */
97 struct arch_process_info
99 struct x86_debug_reg_state debug_reg_state
;
104 /* Mapping between the general-purpose registers in `struct user'
105 format and GDB's register array layout.
106 Note that the transfer layout uses 64-bit regs. */
107 static /*const*/ int i386_regmap
[] =
109 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
110 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
111 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
112 DS
* 8, ES
* 8, FS
* 8, GS
* 8
115 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117 /* So code below doesn't have to care, i386 or amd64. */
118 #define ORIG_EAX ORIG_RAX
121 static const int x86_64_regmap
[] =
123 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
124 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
125 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
126 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
127 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
128 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
129 -1, -1, -1, -1, -1, -1, -1, -1,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
135 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
140 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
141 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
142 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1,
144 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
147 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1,
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
157 #else /* ! __x86_64__ */
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap
[] =
163 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
164 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
165 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
166 DS
* 4, ES
* 4, FS
* 4, GS
* 4
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177 /* Returns true if the current inferior belongs to a x86-64 process,
181 is_64bit_tdesc (void)
183 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
185 return register_size (regcache
->tdesc
, 0) == 8;
191 /* Called by libthread_db. */
194 ps_get_thread_area (struct ps_prochandle
*ph
,
195 lwpid_t lwpid
, int idx
, void **base
)
198 int use_64bit
= is_64bit_tdesc ();
205 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
209 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
220 unsigned int desc
[4];
222 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
223 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base
= (void *) (uintptr_t) desc
[1];
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
238 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
241 int use_64bit
= is_64bit_tdesc ();
246 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
248 *addr
= (CORE_ADDR
) (uintptr_t) base
;
257 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
258 struct thread_info
*thr
= get_lwp_thread (lwp
);
259 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
260 unsigned int desc
[4];
262 const int reg_thread_area
= 3; /* bits to scale down register value. */
265 collect_register_by_name (regcache
, "gs", &gs
);
267 idx
= gs
>> reg_thread_area
;
269 if (ptrace (PTRACE_GET_THREAD_AREA
,
271 (void *) (long) idx
, (unsigned long) &desc
) < 0)
282 x86_cannot_store_register (int regno
)
285 if (is_64bit_tdesc ())
289 return regno
>= I386_NUM_REGS
;
293 x86_cannot_fetch_register (int regno
)
296 if (is_64bit_tdesc ())
300 return regno
>= I386_NUM_REGS
;
304 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
309 if (register_size (regcache
->tdesc
, 0) == 8)
311 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
312 if (x86_64_regmap
[i
] != -1)
313 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
318 int lwpid
= lwpid_of (current_thread
);
320 collect_register_by_name (regcache
, "fs_base", &base
);
321 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
323 collect_register_by_name (regcache
, "gs_base", &base
);
324 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
336 for (i
= 0; i
< I386_NUM_REGS
; i
++)
337 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
339 collect_register_by_name (regcache
, "orig_eax",
340 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
343 /* Sign extend EAX value to avoid potential syscall restart
346 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
347 for a detailed explanation. */
348 if (register_size (regcache
->tdesc
, 0) == 4)
350 void *ptr
= ((gdb_byte
*) buf
351 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
353 *(int64_t *) ptr
= *(int32_t *) ptr
;
359 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
364 if (register_size (regcache
->tdesc
, 0) == 8)
366 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
367 if (x86_64_regmap
[i
] != -1)
368 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
370 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
373 int lwpid
= lwpid_of (current_thread
);
375 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
376 supply_register_by_name (regcache
, "fs_base", &base
);
378 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
379 supply_register_by_name (regcache
, "gs_base", &base
);
386 for (i
= 0; i
< I386_NUM_REGS
; i
++)
387 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
389 supply_register_by_name (regcache
, "orig_eax",
390 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
394 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
397 i387_cache_to_fxsave (regcache
, buf
);
399 i387_cache_to_fsave (regcache
, buf
);
404 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
407 i387_fxsave_to_cache (regcache
, buf
);
409 i387_fsave_to_cache (regcache
, buf
);
416 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_fxsave (regcache
, buf
);
422 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
424 i387_fxsave_to_cache (regcache
, buf
);
430 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
432 i387_cache_to_xsave (regcache
, buf
);
436 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
438 i387_xsave_to_cache (regcache
, buf
);
441 /* ??? The non-biarch i386 case stores all the i387 regs twice.
442 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
443 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
444 doesn't work. IWBN to avoid the duplication in the case where it
445 does work. Maybe the arch_setup routine could check whether it works
446 and update the supported regsets accordingly. */
448 static struct regset_info x86_regsets
[] =
450 #ifdef HAVE_PTRACE_GETREGS
451 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
453 x86_fill_gregset
, x86_store_gregset
},
454 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
455 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
457 # ifdef HAVE_PTRACE_GETFPXREGS
458 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
460 x86_fill_fpxregset
, x86_store_fpxregset
},
463 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
465 x86_fill_fpregset
, x86_store_fpregset
},
466 #endif /* HAVE_PTRACE_GETREGS */
471 x86_get_pc (struct regcache
*regcache
)
473 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
479 collect_register_by_name (regcache
, "rip", &pc
);
480 return (CORE_ADDR
) pc
;
486 collect_register_by_name (regcache
, "eip", &pc
);
487 return (CORE_ADDR
) pc
;
492 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
494 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
500 supply_register_by_name (regcache
, "rip", &newpc
);
506 supply_register_by_name (regcache
, "eip", &newpc
);
510 static const gdb_byte x86_breakpoint
[] = { 0xCC };
511 #define x86_breakpoint_len 1
514 x86_breakpoint_at (CORE_ADDR pc
)
518 (*the_target
->read_memory
) (pc
, &c
, 1);
525 /* Low-level function vector. */
526 struct x86_dr_low_type x86_dr_low
=
528 x86_linux_dr_set_control
,
529 x86_linux_dr_set_addr
,
530 x86_linux_dr_get_addr
,
531 x86_linux_dr_get_status
,
532 x86_linux_dr_get_control
,
536 /* Breakpoint/Watchpoint support. */
539 x86_supports_z_point_type (char z_type
)
545 case Z_PACKET_WRITE_WP
:
546 case Z_PACKET_ACCESS_WP
:
554 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
555 int size
, struct raw_breakpoint
*bp
)
557 struct process_info
*proc
= current_process ();
561 case raw_bkpt_type_hw
:
562 case raw_bkpt_type_write_wp
:
563 case raw_bkpt_type_access_wp
:
565 enum target_hw_bp_type hw_type
566 = raw_bkpt_type_to_target_hw_bp_type (type
);
567 struct x86_debug_reg_state
*state
568 = &proc
->priv
->arch_private
->debug_reg_state
;
570 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
580 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
581 int size
, struct raw_breakpoint
*bp
)
583 struct process_info
*proc
= current_process ();
587 case raw_bkpt_type_hw
:
588 case raw_bkpt_type_write_wp
:
589 case raw_bkpt_type_access_wp
:
591 enum target_hw_bp_type hw_type
592 = raw_bkpt_type_to_target_hw_bp_type (type
);
593 struct x86_debug_reg_state
*state
594 = &proc
->priv
->arch_private
->debug_reg_state
;
596 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
605 x86_stopped_by_watchpoint (void)
607 struct process_info
*proc
= current_process ();
608 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
612 x86_stopped_data_address (void)
614 struct process_info
*proc
= current_process ();
616 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
622 /* Called when a new process is created. */
624 static struct arch_process_info
*
625 x86_linux_new_process (void)
627 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
629 x86_low_init_dregs (&info
->debug_reg_state
);
634 /* Called when a process is being deleted. */
637 x86_linux_delete_process (struct arch_process_info
*info
)
642 /* Target routine for linux_new_fork. */
645 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
647 /* These are allocated by linux_add_process. */
648 gdb_assert (parent
->priv
!= NULL
649 && parent
->priv
->arch_private
!= NULL
);
650 gdb_assert (child
->priv
!= NULL
651 && child
->priv
->arch_private
!= NULL
);
653 /* Linux kernel before 2.6.33 commit
654 72f674d203cd230426437cdcf7dd6f681dad8b0d
655 will inherit hardware debug registers from parent
656 on fork/vfork/clone. Newer Linux kernels create such tasks with
657 zeroed debug registers.
659 GDB core assumes the child inherits the watchpoints/hw
660 breakpoints of the parent, and will remove them all from the
661 forked off process. Copy the debug registers mirrors into the
662 new process so that all breakpoints and watchpoints can be
663 removed together. The debug registers mirror will become zeroed
664 in the end before detaching the forked off process, thus making
665 this compatible with older Linux kernels too. */
667 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
670 /* See nat/x86-dregs.h. */
672 struct x86_debug_reg_state
*
673 x86_debug_reg_state (pid_t pid
)
675 struct process_info
*proc
= find_process_pid (pid
);
677 return &proc
->priv
->arch_private
->debug_reg_state
;
680 /* When GDBSERVER is built as a 64-bit application on linux, the
681 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
682 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
683 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
684 conversion in-place ourselves. */
686 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
687 layout of the inferiors' architecture. Returns true if any
688 conversion was done; false otherwise. If DIRECTION is 1, then copy
689 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
693 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
696 unsigned int machine
;
697 int tid
= lwpid_of (current_thread
);
698 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
700 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
701 if (!is_64bit_tdesc ())
702 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
704 /* No fixup for native x32 GDB. */
705 else if (!is_elf64
&& sizeof (void *) == 8)
706 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
715 /* Format of XSAVE extended state is:
719 sw_usable_bytes[464..511]
720 xstate_hdr_bytes[512..575]
725 Same memory layout will be used for the coredump NT_X86_XSTATE
726 representing the XSAVE extended state registers.
728 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
729 extended state mask, which is the same as the extended control register
730 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
731 together with the mask saved in the xstate_hdr_bytes to determine what
732 states the processor/OS supports and what state, used or initialized,
733 the process/thread is in. */
734 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
736 /* Does the current host support the GETFPXREGS request? The header
737 file may or may not define it, and even if it is defined, the
738 kernel will return EIO if it's running on a pre-SSE processor. */
739 int have_ptrace_getfpxregs
=
740 #ifdef HAVE_PTRACE_GETFPXREGS
747 /* Get Linux/x86 target description from running target. */
749 static const struct target_desc
*
750 x86_linux_read_description (void)
752 unsigned int machine
;
756 static uint64_t xcr0
;
757 struct regset_info
*regset
;
759 tid
= lwpid_of (current_thread
);
761 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
763 if (sizeof (void *) == 4)
766 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
768 else if (machine
== EM_X86_64
)
769 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
773 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
774 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
776 elf_fpxregset_t fpxregs
;
778 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
780 have_ptrace_getfpxregs
= 0;
781 have_ptrace_getregset
= 0;
782 return i386_linux_read_description (X86_XSTATE_X87
);
785 have_ptrace_getfpxregs
= 1;
791 x86_xcr0
= X86_XSTATE_SSE_MASK
;
795 if (machine
== EM_X86_64
)
796 return tdesc_amd64_linux_no_xml
;
799 return tdesc_i386_linux_no_xml
;
802 if (have_ptrace_getregset
== -1)
804 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
807 iov
.iov_base
= xstateregs
;
808 iov
.iov_len
= sizeof (xstateregs
);
810 /* Check if PTRACE_GETREGSET works. */
811 if (ptrace (PTRACE_GETREGSET
, tid
,
812 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
813 have_ptrace_getregset
= 0;
816 have_ptrace_getregset
= 1;
818 /* Get XCR0 from XSAVE extended state. */
819 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
820 / sizeof (uint64_t))];
822 /* Use PTRACE_GETREGSET if it is available. */
823 for (regset
= x86_regsets
;
824 regset
->fill_function
!= NULL
; regset
++)
825 if (regset
->get_request
== PTRACE_GETREGSET
)
826 regset
->size
= X86_XSTATE_SIZE (xcr0
);
827 else if (regset
->type
!= GENERAL_REGS
)
832 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
833 xcr0_features
= (have_ptrace_getregset
834 && (xcr0
& X86_XSTATE_ALL_MASK
));
839 if (machine
== EM_X86_64
)
842 const target_desc
*tdesc
= NULL
;
846 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
851 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
857 const target_desc
*tdesc
= NULL
;
860 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
863 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
868 gdb_assert_not_reached ("failed to return tdesc");
871 /* Update all the target description of all processes; a new GDB
872 connected, and it may or not support xml target descriptions. */
875 x86_linux_update_xmltarget (void)
877 struct thread_info
*saved_thread
= current_thread
;
879 /* Before changing the register cache's internal layout, flush the
880 contents of the current valid caches back to the threads, and
881 release the current regcache objects. */
884 for_each_process ([] (process_info
*proc
) {
887 /* Look up any thread of this process. */
888 current_thread
= find_any_thread_of_pid (pid
);
890 the_low_target
.arch_setup ();
893 current_thread
= saved_thread
;
896 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
900 x86_linux_process_qsupported (char **features
, int count
)
904 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
905 with "i386" in qSupported query, it supports x86 XML target
908 for (i
= 0; i
< count
; i
++)
910 const char *feature
= features
[i
];
912 if (startswith (feature
, "xmlRegisters="))
914 char *copy
= xstrdup (feature
+ 13);
917 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
919 if (strcmp (p
, "i386") == 0)
929 x86_linux_update_xmltarget ();
932 /* Common for x86/x86-64. */
934 static struct regsets_info x86_regsets_info
=
936 x86_regsets
, /* regsets */
938 NULL
, /* disabled_regsets */
942 static struct regs_info amd64_linux_regs_info
=
944 NULL
, /* regset_bitmap */
945 NULL
, /* usrregs_info */
949 static struct usrregs_info i386_linux_usrregs_info
=
955 static struct regs_info i386_linux_regs_info
=
957 NULL
, /* regset_bitmap */
958 &i386_linux_usrregs_info
,
962 const struct regs_info
*
963 x86_linux_regs_info (void)
966 if (is_64bit_tdesc ())
967 return &amd64_linux_regs_info
;
970 return &i386_linux_regs_info
;
973 /* Initialize the target description for the architecture of the
977 x86_arch_setup (void)
979 current_process ()->tdesc
= x86_linux_read_description ();
982 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
983 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
986 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
988 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
994 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
995 *sysno
= (int) l_sysno
;
998 collect_register_by_name (regcache
, "orig_eax", sysno
);
1002 x86_supports_tracepoints (void)
1008 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1010 write_inferior_memory (*to
, buf
, len
);
1015 push_opcode (unsigned char *buf
, const char *op
)
1017 unsigned char *buf_org
= buf
;
1022 unsigned long ul
= strtoul (op
, &endptr
, 16);
1031 return buf
- buf_org
;
1036 /* Build a jump pad that saves registers and calls a collection
1037 function. Writes a jump instruction to the jump pad to
1038 JJUMPAD_INSN. The caller is responsible to write it in at the
1039 tracepoint address. */
1042 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1043 CORE_ADDR collector
,
1046 CORE_ADDR
*jump_entry
,
1047 CORE_ADDR
*trampoline
,
1048 ULONGEST
*trampoline_size
,
1049 unsigned char *jjump_pad_insn
,
1050 ULONGEST
*jjump_pad_insn_size
,
1051 CORE_ADDR
*adjusted_insn_addr
,
1052 CORE_ADDR
*adjusted_insn_addr_end
,
1055 unsigned char buf
[40];
1059 CORE_ADDR buildaddr
= *jump_entry
;
1061 /* Build the jump pad. */
1063 /* First, do tracepoint data collection. Save registers. */
1065 /* Need to ensure stack pointer saved first. */
1066 buf
[i
++] = 0x54; /* push %rsp */
1067 buf
[i
++] = 0x55; /* push %rbp */
1068 buf
[i
++] = 0x57; /* push %rdi */
1069 buf
[i
++] = 0x56; /* push %rsi */
1070 buf
[i
++] = 0x52; /* push %rdx */
1071 buf
[i
++] = 0x51; /* push %rcx */
1072 buf
[i
++] = 0x53; /* push %rbx */
1073 buf
[i
++] = 0x50; /* push %rax */
1074 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1075 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1076 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1077 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1078 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1079 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1080 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1081 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1082 buf
[i
++] = 0x9c; /* pushfq */
1083 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1085 memcpy (buf
+ i
, &tpaddr
, 8);
1087 buf
[i
++] = 0x57; /* push %rdi */
1088 append_insns (&buildaddr
, i
, buf
);
1090 /* Stack space for the collecting_t object. */
1092 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1093 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1094 memcpy (buf
+ i
, &tpoint
, 8);
1096 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1097 i
+= push_opcode (&buf
[i
],
1098 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1099 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1100 append_insns (&buildaddr
, i
, buf
);
1104 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1105 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1107 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1108 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1109 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1110 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1111 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1112 append_insns (&buildaddr
, i
, buf
);
1114 /* Set up the gdb_collect call. */
1115 /* At this point, (stack pointer + 0x18) is the base of our saved
1119 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1120 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1122 /* tpoint address may be 64-bit wide. */
1123 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1124 memcpy (buf
+ i
, &tpoint
, 8);
1126 append_insns (&buildaddr
, i
, buf
);
1128 /* The collector function being in the shared library, may be
1129 >31-bits away off the jump pad. */
1131 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1132 memcpy (buf
+ i
, &collector
, 8);
1134 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1135 append_insns (&buildaddr
, i
, buf
);
1137 /* Clear the spin-lock. */
1139 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1140 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1141 memcpy (buf
+ i
, &lockaddr
, 8);
1143 append_insns (&buildaddr
, i
, buf
);
1145 /* Remove stack that had been used for the collect_t object. */
1147 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1148 append_insns (&buildaddr
, i
, buf
);
1150 /* Restore register state. */
1152 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1156 buf
[i
++] = 0x9d; /* popfq */
1157 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1158 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1159 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1160 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1161 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1162 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1163 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1164 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1165 buf
[i
++] = 0x58; /* pop %rax */
1166 buf
[i
++] = 0x5b; /* pop %rbx */
1167 buf
[i
++] = 0x59; /* pop %rcx */
1168 buf
[i
++] = 0x5a; /* pop %rdx */
1169 buf
[i
++] = 0x5e; /* pop %rsi */
1170 buf
[i
++] = 0x5f; /* pop %rdi */
1171 buf
[i
++] = 0x5d; /* pop %rbp */
1172 buf
[i
++] = 0x5c; /* pop %rsp */
1173 append_insns (&buildaddr
, i
, buf
);
1175 /* Now, adjust the original instruction to execute in the jump
1177 *adjusted_insn_addr
= buildaddr
;
1178 relocate_instruction (&buildaddr
, tpaddr
);
1179 *adjusted_insn_addr_end
= buildaddr
;
1181 /* Finally, write a jump back to the program. */
1183 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1184 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1187 "E.Jump back from jump pad too far from tracepoint "
1188 "(offset 0x%" PRIx64
" > int32).", loffset
);
1192 offset
= (int) loffset
;
1193 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1194 memcpy (buf
+ 1, &offset
, 4);
1195 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1197 /* The jump pad is now built. Wire in a jump to our jump pad. This
1198 is always done last (by our caller actually), so that we can
1199 install fast tracepoints with threads running. This relies on
1200 the agent's atomic write support. */
1201 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1202 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1205 "E.Jump pad too far from tracepoint "
1206 "(offset 0x%" PRIx64
" > int32).", loffset
);
1210 offset
= (int) loffset
;
1212 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1213 memcpy (buf
+ 1, &offset
, 4);
1214 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1215 *jjump_pad_insn_size
= sizeof (jump_insn
);
1217 /* Return the end address of our pad. */
1218 *jump_entry
= buildaddr
;
1223 #endif /* __x86_64__ */
1225 /* Build a jump pad that saves registers and calls a collection
1226 function. Writes a jump instruction to the jump pad to
1227 JJUMPAD_INSN. The caller is responsible to write it in at the
1228 tracepoint address. */
1231 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1232 CORE_ADDR collector
,
1235 CORE_ADDR
*jump_entry
,
1236 CORE_ADDR
*trampoline
,
1237 ULONGEST
*trampoline_size
,
1238 unsigned char *jjump_pad_insn
,
1239 ULONGEST
*jjump_pad_insn_size
,
1240 CORE_ADDR
*adjusted_insn_addr
,
1241 CORE_ADDR
*adjusted_insn_addr_end
,
1244 unsigned char buf
[0x100];
1246 CORE_ADDR buildaddr
= *jump_entry
;
1248 /* Build the jump pad. */
1250 /* First, do tracepoint data collection. Save registers. */
1252 buf
[i
++] = 0x60; /* pushad */
1253 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1254 *((int *)(buf
+ i
)) = (int) tpaddr
;
1256 buf
[i
++] = 0x9c; /* pushf */
1257 buf
[i
++] = 0x1e; /* push %ds */
1258 buf
[i
++] = 0x06; /* push %es */
1259 buf
[i
++] = 0x0f; /* push %fs */
1261 buf
[i
++] = 0x0f; /* push %gs */
1263 buf
[i
++] = 0x16; /* push %ss */
1264 buf
[i
++] = 0x0e; /* push %cs */
1265 append_insns (&buildaddr
, i
, buf
);
1267 /* Stack space for the collecting_t object. */
1269 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1271 /* Build the object. */
1272 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1273 memcpy (buf
+ i
, &tpoint
, 4);
1275 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1277 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1278 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1279 append_insns (&buildaddr
, i
, buf
);
1281 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1282 If we cared for it, this could be using xchg alternatively. */
1285 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1286 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1288 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1290 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1291 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1292 append_insns (&buildaddr
, i
, buf
);
1295 /* Set up arguments to the gdb_collect call. */
1297 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1298 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1299 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1300 append_insns (&buildaddr
, i
, buf
);
1303 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1304 append_insns (&buildaddr
, i
, buf
);
1307 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1308 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1310 append_insns (&buildaddr
, i
, buf
);
1312 buf
[0] = 0xe8; /* call <reladdr> */
1313 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1314 memcpy (buf
+ 1, &offset
, 4);
1315 append_insns (&buildaddr
, 5, buf
);
1316 /* Clean up after the call. */
1317 buf
[0] = 0x83; /* add $0x8,%esp */
1320 append_insns (&buildaddr
, 3, buf
);
1323 /* Clear the spin-lock. This would need the LOCK prefix on older
1326 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1327 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1328 memcpy (buf
+ i
, &lockaddr
, 4);
1330 append_insns (&buildaddr
, i
, buf
);
1333 /* Remove stack that had been used for the collect_t object. */
1335 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1336 append_insns (&buildaddr
, i
, buf
);
1339 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1342 buf
[i
++] = 0x17; /* pop %ss */
1343 buf
[i
++] = 0x0f; /* pop %gs */
1345 buf
[i
++] = 0x0f; /* pop %fs */
1347 buf
[i
++] = 0x07; /* pop %es */
1348 buf
[i
++] = 0x1f; /* pop %ds */
1349 buf
[i
++] = 0x9d; /* popf */
1350 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1353 buf
[i
++] = 0x61; /* popad */
1354 append_insns (&buildaddr
, i
, buf
);
1356 /* Now, adjust the original instruction to execute in the jump
1358 *adjusted_insn_addr
= buildaddr
;
1359 relocate_instruction (&buildaddr
, tpaddr
);
1360 *adjusted_insn_addr_end
= buildaddr
;
1362 /* Write the jump back to the program. */
1363 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1364 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1365 memcpy (buf
+ 1, &offset
, 4);
1366 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1368 /* The jump pad is now built. Wire in a jump to our jump pad. This
1369 is always done last (by our caller actually), so that we can
1370 install fast tracepoints with threads running. This relies on
1371 the agent's atomic write support. */
1374 /* Create a trampoline. */
1375 *trampoline_size
= sizeof (jump_insn
);
1376 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1378 /* No trampoline space available. */
1380 "E.Cannot allocate trampoline space needed for fast "
1381 "tracepoints on 4-byte instructions.");
1385 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1386 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1387 memcpy (buf
+ 1, &offset
, 4);
1388 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1390 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1391 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1392 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1393 memcpy (buf
+ 2, &offset
, 2);
1394 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1395 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1399 /* Else use a 32-bit relative jump instruction. */
1400 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1401 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1402 memcpy (buf
+ 1, &offset
, 4);
1403 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1404 *jjump_pad_insn_size
= sizeof (jump_insn
);
1407 /* Return the end address of our pad. */
1408 *jump_entry
= buildaddr
;
1414 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1415 CORE_ADDR collector
,
1418 CORE_ADDR
*jump_entry
,
1419 CORE_ADDR
*trampoline
,
1420 ULONGEST
*trampoline_size
,
1421 unsigned char *jjump_pad_insn
,
1422 ULONGEST
*jjump_pad_insn_size
,
1423 CORE_ADDR
*adjusted_insn_addr
,
1424 CORE_ADDR
*adjusted_insn_addr_end
,
1428 if (is_64bit_tdesc ())
1429 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1430 collector
, lockaddr
,
1431 orig_size
, jump_entry
,
1432 trampoline
, trampoline_size
,
1434 jjump_pad_insn_size
,
1436 adjusted_insn_addr_end
,
1440 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1441 collector
, lockaddr
,
1442 orig_size
, jump_entry
,
1443 trampoline
, trampoline_size
,
1445 jjump_pad_insn_size
,
1447 adjusted_insn_addr_end
,
1451 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1455 x86_get_min_fast_tracepoint_insn_len (void)
1457 static int warned_about_fast_tracepoints
= 0;
1460 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1461 used for fast tracepoints. */
1462 if (is_64bit_tdesc ())
1466 if (agent_loaded_p ())
1468 char errbuf
[IPA_BUFSIZ
];
1472 /* On x86, if trampolines are available, then 4-byte jump instructions
1473 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1474 with a 4-byte offset are used instead. */
1475 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1479 /* GDB has no channel to explain to user why a shorter fast
1480 tracepoint is not possible, but at least make GDBserver
1481 mention that something has gone awry. */
1482 if (!warned_about_fast_tracepoints
)
1484 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1485 warned_about_fast_tracepoints
= 1;
1492 /* Indicate that the minimum length is currently unknown since the IPA
1493 has not loaded yet. */
1499 add_insns (unsigned char *start
, int len
)
1501 CORE_ADDR buildaddr
= current_insn_ptr
;
1504 debug_printf ("Adding %d bytes of insn at %s\n",
1505 len
, paddress (buildaddr
));
1507 append_insns (&buildaddr
, len
, start
);
1508 current_insn_ptr
= buildaddr
;
1511 /* Our general strategy for emitting code is to avoid specifying raw
1512 bytes whenever possible, and instead copy a block of inline asm
1513 that is embedded in the function. This is a little messy, because
1514 we need to keep the compiler from discarding what looks like dead
1515 code, plus suppress various warnings. */
1517 #define EMIT_ASM(NAME, INSNS) \
1520 extern unsigned char start_ ## NAME, end_ ## NAME; \
1521 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1522 __asm__ ("jmp end_" #NAME "\n" \
1523 "\t" "start_" #NAME ":" \
1525 "\t" "end_" #NAME ":"); \
1530 #define EMIT_ASM32(NAME,INSNS) \
1533 extern unsigned char start_ ## NAME, end_ ## NAME; \
1534 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1535 __asm__ (".code32\n" \
1536 "\t" "jmp end_" #NAME "\n" \
1537 "\t" "start_" #NAME ":\n" \
1539 "\t" "end_" #NAME ":\n" \
1545 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1552 amd64_emit_prologue (void)
1554 EMIT_ASM (amd64_prologue
,
1556 "movq %rsp,%rbp\n\t"
1557 "sub $0x20,%rsp\n\t"
1558 "movq %rdi,-8(%rbp)\n\t"
1559 "movq %rsi,-16(%rbp)");
1564 amd64_emit_epilogue (void)
1566 EMIT_ASM (amd64_epilogue
,
1567 "movq -16(%rbp),%rdi\n\t"
1568 "movq %rax,(%rdi)\n\t"
1575 amd64_emit_add (void)
1577 EMIT_ASM (amd64_add
,
1578 "add (%rsp),%rax\n\t"
1579 "lea 0x8(%rsp),%rsp");
1583 amd64_emit_sub (void)
1585 EMIT_ASM (amd64_sub
,
1586 "sub %rax,(%rsp)\n\t"
1591 amd64_emit_mul (void)
1597 amd64_emit_lsh (void)
1603 amd64_emit_rsh_signed (void)
1609 amd64_emit_rsh_unsigned (void)
1615 amd64_emit_ext (int arg
)
1620 EMIT_ASM (amd64_ext_8
,
1626 EMIT_ASM (amd64_ext_16
,
1631 EMIT_ASM (amd64_ext_32
,
1640 amd64_emit_log_not (void)
1642 EMIT_ASM (amd64_log_not
,
1643 "test %rax,%rax\n\t"
1649 amd64_emit_bit_and (void)
1651 EMIT_ASM (amd64_and
,
1652 "and (%rsp),%rax\n\t"
1653 "lea 0x8(%rsp),%rsp");
1657 amd64_emit_bit_or (void)
1660 "or (%rsp),%rax\n\t"
1661 "lea 0x8(%rsp),%rsp");
1665 amd64_emit_bit_xor (void)
1667 EMIT_ASM (amd64_xor
,
1668 "xor (%rsp),%rax\n\t"
1669 "lea 0x8(%rsp),%rsp");
1673 amd64_emit_bit_not (void)
1675 EMIT_ASM (amd64_bit_not
,
1676 "xorq $0xffffffffffffffff,%rax");
1680 amd64_emit_equal (void)
1682 EMIT_ASM (amd64_equal
,
1683 "cmp %rax,(%rsp)\n\t"
1684 "je .Lamd64_equal_true\n\t"
1686 "jmp .Lamd64_equal_end\n\t"
1687 ".Lamd64_equal_true:\n\t"
1689 ".Lamd64_equal_end:\n\t"
1690 "lea 0x8(%rsp),%rsp");
1694 amd64_emit_less_signed (void)
1696 EMIT_ASM (amd64_less_signed
,
1697 "cmp %rax,(%rsp)\n\t"
1698 "jl .Lamd64_less_signed_true\n\t"
1700 "jmp .Lamd64_less_signed_end\n\t"
1701 ".Lamd64_less_signed_true:\n\t"
1703 ".Lamd64_less_signed_end:\n\t"
1704 "lea 0x8(%rsp),%rsp");
1708 amd64_emit_less_unsigned (void)
1710 EMIT_ASM (amd64_less_unsigned
,
1711 "cmp %rax,(%rsp)\n\t"
1712 "jb .Lamd64_less_unsigned_true\n\t"
1714 "jmp .Lamd64_less_unsigned_end\n\t"
1715 ".Lamd64_less_unsigned_true:\n\t"
1717 ".Lamd64_less_unsigned_end:\n\t"
1718 "lea 0x8(%rsp),%rsp");
1722 amd64_emit_ref (int size
)
1727 EMIT_ASM (amd64_ref1
,
1731 EMIT_ASM (amd64_ref2
,
1735 EMIT_ASM (amd64_ref4
,
1736 "movl (%rax),%eax");
1739 EMIT_ASM (amd64_ref8
,
1740 "movq (%rax),%rax");
1746 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1748 EMIT_ASM (amd64_if_goto
,
1752 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1760 amd64_emit_goto (int *offset_p
, int *size_p
)
1762 EMIT_ASM (amd64_goto
,
1763 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1771 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1773 int diff
= (to
- (from
+ size
));
1774 unsigned char buf
[sizeof (int)];
1782 memcpy (buf
, &diff
, sizeof (int));
1783 write_inferior_memory (from
, buf
, sizeof (int));
1787 amd64_emit_const (LONGEST num
)
1789 unsigned char buf
[16];
1791 CORE_ADDR buildaddr
= current_insn_ptr
;
1794 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1795 memcpy (&buf
[i
], &num
, sizeof (num
));
1797 append_insns (&buildaddr
, i
, buf
);
1798 current_insn_ptr
= buildaddr
;
1802 amd64_emit_call (CORE_ADDR fn
)
1804 unsigned char buf
[16];
1806 CORE_ADDR buildaddr
;
1809 /* The destination function being in the shared library, may be
1810 >31-bits away off the compiled code pad. */
1812 buildaddr
= current_insn_ptr
;
1814 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1818 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1820 /* Offset is too large for a call. Use callq, but that requires
1821 a register, so avoid it if possible. Use r10, since it is
1822 call-clobbered, we don't have to push/pop it. */
1823 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1825 memcpy (buf
+ i
, &fn
, 8);
1827 buf
[i
++] = 0xff; /* callq *%r10 */
1832 int offset32
= offset64
; /* we know we can't overflow here. */
1834 buf
[i
++] = 0xe8; /* call <reladdr> */
1835 memcpy (buf
+ i
, &offset32
, 4);
1839 append_insns (&buildaddr
, i
, buf
);
1840 current_insn_ptr
= buildaddr
;
1844 amd64_emit_reg (int reg
)
1846 unsigned char buf
[16];
1848 CORE_ADDR buildaddr
;
1850 /* Assume raw_regs is still in %rdi. */
1851 buildaddr
= current_insn_ptr
;
1853 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1854 memcpy (&buf
[i
], ®
, sizeof (reg
));
1856 append_insns (&buildaddr
, i
, buf
);
1857 current_insn_ptr
= buildaddr
;
1858 amd64_emit_call (get_raw_reg_func_addr ());
1862 amd64_emit_pop (void)
1864 EMIT_ASM (amd64_pop
,
1869 amd64_emit_stack_flush (void)
1871 EMIT_ASM (amd64_stack_flush
,
1876 amd64_emit_zero_ext (int arg
)
1881 EMIT_ASM (amd64_zero_ext_8
,
1885 EMIT_ASM (amd64_zero_ext_16
,
1886 "and $0xffff,%rax");
1889 EMIT_ASM (amd64_zero_ext_32
,
1890 "mov $0xffffffff,%rcx\n\t"
1899 amd64_emit_swap (void)
1901 EMIT_ASM (amd64_swap
,
1908 amd64_emit_stack_adjust (int n
)
1910 unsigned char buf
[16];
1912 CORE_ADDR buildaddr
= current_insn_ptr
;
1915 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1919 /* This only handles adjustments up to 16, but we don't expect any more. */
1921 append_insns (&buildaddr
, i
, buf
);
1922 current_insn_ptr
= buildaddr
;
1925 /* FN's prototype is `LONGEST(*fn)(int)'. */
1928 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1930 unsigned char buf
[16];
1932 CORE_ADDR buildaddr
;
1934 buildaddr
= current_insn_ptr
;
1936 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1937 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1939 append_insns (&buildaddr
, i
, buf
);
1940 current_insn_ptr
= buildaddr
;
1941 amd64_emit_call (fn
);
1944 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1947 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1949 unsigned char buf
[16];
1951 CORE_ADDR buildaddr
;
1953 buildaddr
= current_insn_ptr
;
1955 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1956 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1958 append_insns (&buildaddr
, i
, buf
);
1959 current_insn_ptr
= buildaddr
;
1960 EMIT_ASM (amd64_void_call_2_a
,
1961 /* Save away a copy of the stack top. */
1963 /* Also pass top as the second argument. */
1965 amd64_emit_call (fn
);
1966 EMIT_ASM (amd64_void_call_2_b
,
1967 /* Restore the stack top, %rax may have been trashed. */
1972 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1975 "cmp %rax,(%rsp)\n\t"
1976 "jne .Lamd64_eq_fallthru\n\t"
1977 "lea 0x8(%rsp),%rsp\n\t"
1979 /* jmp, but don't trust the assembler to choose the right jump */
1980 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1981 ".Lamd64_eq_fallthru:\n\t"
1982 "lea 0x8(%rsp),%rsp\n\t"
1992 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
1995 "cmp %rax,(%rsp)\n\t"
1996 "je .Lamd64_ne_fallthru\n\t"
1997 "lea 0x8(%rsp),%rsp\n\t"
1999 /* jmp, but don't trust the assembler to choose the right jump */
2000 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2001 ".Lamd64_ne_fallthru:\n\t"
2002 "lea 0x8(%rsp),%rsp\n\t"
2012 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2015 "cmp %rax,(%rsp)\n\t"
2016 "jnl .Lamd64_lt_fallthru\n\t"
2017 "lea 0x8(%rsp),%rsp\n\t"
2019 /* jmp, but don't trust the assembler to choose the right jump */
2020 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2021 ".Lamd64_lt_fallthru:\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2032 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2035 "cmp %rax,(%rsp)\n\t"
2036 "jnle .Lamd64_le_fallthru\n\t"
2037 "lea 0x8(%rsp),%rsp\n\t"
2039 /* jmp, but don't trust the assembler to choose the right jump */
2040 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2041 ".Lamd64_le_fallthru:\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2052 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2055 "cmp %rax,(%rsp)\n\t"
2056 "jng .Lamd64_gt_fallthru\n\t"
2057 "lea 0x8(%rsp),%rsp\n\t"
2059 /* jmp, but don't trust the assembler to choose the right jump */
2060 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2061 ".Lamd64_gt_fallthru:\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2072 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2075 "cmp %rax,(%rsp)\n\t"
2076 "jnge .Lamd64_ge_fallthru\n\t"
2077 ".Lamd64_ge_jump:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2080 /* jmp, but don't trust the assembler to choose the right jump */
2081 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2082 ".Lamd64_ge_fallthru:\n\t"
2083 "lea 0x8(%rsp),%rsp\n\t"
2092 struct emit_ops amd64_emit_ops
=
2094 amd64_emit_prologue
,
2095 amd64_emit_epilogue
,
2100 amd64_emit_rsh_signed
,
2101 amd64_emit_rsh_unsigned
,
2109 amd64_emit_less_signed
,
2110 amd64_emit_less_unsigned
,
2114 amd64_write_goto_address
,
2119 amd64_emit_stack_flush
,
2120 amd64_emit_zero_ext
,
2122 amd64_emit_stack_adjust
,
2123 amd64_emit_int_call_1
,
2124 amd64_emit_void_call_2
,
2133 #endif /* __x86_64__ */
2136 i386_emit_prologue (void)
2138 EMIT_ASM32 (i386_prologue
,
2142 /* At this point, the raw regs base address is at 8(%ebp), and the
2143 value pointer is at 12(%ebp). */
2147 i386_emit_epilogue (void)
2149 EMIT_ASM32 (i386_epilogue
,
2150 "mov 12(%ebp),%ecx\n\t"
2151 "mov %eax,(%ecx)\n\t"
2152 "mov %ebx,0x4(%ecx)\n\t"
2160 i386_emit_add (void)
2162 EMIT_ASM32 (i386_add
,
2163 "add (%esp),%eax\n\t"
2164 "adc 0x4(%esp),%ebx\n\t"
2165 "lea 0x8(%esp),%esp");
2169 i386_emit_sub (void)
2171 EMIT_ASM32 (i386_sub
,
2172 "subl %eax,(%esp)\n\t"
2173 "sbbl %ebx,4(%esp)\n\t"
2179 i386_emit_mul (void)
2185 i386_emit_lsh (void)
2191 i386_emit_rsh_signed (void)
2197 i386_emit_rsh_unsigned (void)
2203 i386_emit_ext (int arg
)
2208 EMIT_ASM32 (i386_ext_8
,
2211 "movl %eax,%ebx\n\t"
2215 EMIT_ASM32 (i386_ext_16
,
2217 "movl %eax,%ebx\n\t"
2221 EMIT_ASM32 (i386_ext_32
,
2222 "movl %eax,%ebx\n\t"
2231 i386_emit_log_not (void)
2233 EMIT_ASM32 (i386_log_not
,
2235 "test %eax,%eax\n\t"
2242 i386_emit_bit_and (void)
2244 EMIT_ASM32 (i386_and
,
2245 "and (%esp),%eax\n\t"
2246 "and 0x4(%esp),%ebx\n\t"
2247 "lea 0x8(%esp),%esp");
2251 i386_emit_bit_or (void)
2253 EMIT_ASM32 (i386_or
,
2254 "or (%esp),%eax\n\t"
2255 "or 0x4(%esp),%ebx\n\t"
2256 "lea 0x8(%esp),%esp");
2260 i386_emit_bit_xor (void)
2262 EMIT_ASM32 (i386_xor
,
2263 "xor (%esp),%eax\n\t"
2264 "xor 0x4(%esp),%ebx\n\t"
2265 "lea 0x8(%esp),%esp");
2269 i386_emit_bit_not (void)
2271 EMIT_ASM32 (i386_bit_not
,
2272 "xor $0xffffffff,%eax\n\t"
2273 "xor $0xffffffff,%ebx\n\t");
2277 i386_emit_equal (void)
2279 EMIT_ASM32 (i386_equal
,
2280 "cmpl %ebx,4(%esp)\n\t"
2281 "jne .Li386_equal_false\n\t"
2282 "cmpl %eax,(%esp)\n\t"
2283 "je .Li386_equal_true\n\t"
2284 ".Li386_equal_false:\n\t"
2286 "jmp .Li386_equal_end\n\t"
2287 ".Li386_equal_true:\n\t"
2289 ".Li386_equal_end:\n\t"
2291 "lea 0x8(%esp),%esp");
2295 i386_emit_less_signed (void)
2297 EMIT_ASM32 (i386_less_signed
,
2298 "cmpl %ebx,4(%esp)\n\t"
2299 "jl .Li386_less_signed_true\n\t"
2300 "jne .Li386_less_signed_false\n\t"
2301 "cmpl %eax,(%esp)\n\t"
2302 "jl .Li386_less_signed_true\n\t"
2303 ".Li386_less_signed_false:\n\t"
2305 "jmp .Li386_less_signed_end\n\t"
2306 ".Li386_less_signed_true:\n\t"
2308 ".Li386_less_signed_end:\n\t"
2310 "lea 0x8(%esp),%esp");
2314 i386_emit_less_unsigned (void)
2316 EMIT_ASM32 (i386_less_unsigned
,
2317 "cmpl %ebx,4(%esp)\n\t"
2318 "jb .Li386_less_unsigned_true\n\t"
2319 "jne .Li386_less_unsigned_false\n\t"
2320 "cmpl %eax,(%esp)\n\t"
2321 "jb .Li386_less_unsigned_true\n\t"
2322 ".Li386_less_unsigned_false:\n\t"
2324 "jmp .Li386_less_unsigned_end\n\t"
2325 ".Li386_less_unsigned_true:\n\t"
2327 ".Li386_less_unsigned_end:\n\t"
2329 "lea 0x8(%esp),%esp");
2333 i386_emit_ref (int size
)
2338 EMIT_ASM32 (i386_ref1
,
2342 EMIT_ASM32 (i386_ref2
,
2346 EMIT_ASM32 (i386_ref4
,
2347 "movl (%eax),%eax");
2350 EMIT_ASM32 (i386_ref8
,
2351 "movl 4(%eax),%ebx\n\t"
2352 "movl (%eax),%eax");
2358 i386_emit_if_goto (int *offset_p
, int *size_p
)
2360 EMIT_ASM32 (i386_if_goto
,
2366 /* Don't trust the assembler to choose the right jump */
2367 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2370 *offset_p
= 11; /* be sure that this matches the sequence above */
2376 i386_emit_goto (int *offset_p
, int *size_p
)
2378 EMIT_ASM32 (i386_goto
,
2379 /* Don't trust the assembler to choose the right jump */
2380 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2388 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2390 int diff
= (to
- (from
+ size
));
2391 unsigned char buf
[sizeof (int)];
2393 /* We're only doing 4-byte sizes at the moment. */
2400 memcpy (buf
, &diff
, sizeof (int));
2401 write_inferior_memory (from
, buf
, sizeof (int));
2405 i386_emit_const (LONGEST num
)
2407 unsigned char buf
[16];
2409 CORE_ADDR buildaddr
= current_insn_ptr
;
2412 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2413 lo
= num
& 0xffffffff;
2414 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2416 hi
= ((num
>> 32) & 0xffffffff);
2419 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2420 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2425 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2427 append_insns (&buildaddr
, i
, buf
);
2428 current_insn_ptr
= buildaddr
;
2432 i386_emit_call (CORE_ADDR fn
)
2434 unsigned char buf
[16];
2436 CORE_ADDR buildaddr
;
2438 buildaddr
= current_insn_ptr
;
2440 buf
[i
++] = 0xe8; /* call <reladdr> */
2441 offset
= ((int) fn
) - (buildaddr
+ 5);
2442 memcpy (buf
+ 1, &offset
, 4);
2443 append_insns (&buildaddr
, 5, buf
);
2444 current_insn_ptr
= buildaddr
;
2448 i386_emit_reg (int reg
)
2450 unsigned char buf
[16];
2452 CORE_ADDR buildaddr
;
2454 EMIT_ASM32 (i386_reg_a
,
2456 buildaddr
= current_insn_ptr
;
2458 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2459 memcpy (&buf
[i
], ®
, sizeof (reg
));
2461 append_insns (&buildaddr
, i
, buf
);
2462 current_insn_ptr
= buildaddr
;
2463 EMIT_ASM32 (i386_reg_b
,
2464 "mov %eax,4(%esp)\n\t"
2465 "mov 8(%ebp),%eax\n\t"
2467 i386_emit_call (get_raw_reg_func_addr ());
2468 EMIT_ASM32 (i386_reg_c
,
2470 "lea 0x8(%esp),%esp");
2474 i386_emit_pop (void)
2476 EMIT_ASM32 (i386_pop
,
2482 i386_emit_stack_flush (void)
2484 EMIT_ASM32 (i386_stack_flush
,
2490 i386_emit_zero_ext (int arg
)
2495 EMIT_ASM32 (i386_zero_ext_8
,
2496 "and $0xff,%eax\n\t"
2500 EMIT_ASM32 (i386_zero_ext_16
,
2501 "and $0xffff,%eax\n\t"
2505 EMIT_ASM32 (i386_zero_ext_32
,
2514 i386_emit_swap (void)
2516 EMIT_ASM32 (i386_swap
,
2526 i386_emit_stack_adjust (int n
)
2528 unsigned char buf
[16];
2530 CORE_ADDR buildaddr
= current_insn_ptr
;
2533 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2537 append_insns (&buildaddr
, i
, buf
);
2538 current_insn_ptr
= buildaddr
;
2541 /* FN's prototype is `LONGEST(*fn)(int)'. */
2544 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2546 unsigned char buf
[16];
2548 CORE_ADDR buildaddr
;
2550 EMIT_ASM32 (i386_int_call_1_a
,
2551 /* Reserve a bit of stack space. */
2553 /* Put the one argument on the stack. */
2554 buildaddr
= current_insn_ptr
;
2556 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2559 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2561 append_insns (&buildaddr
, i
, buf
);
2562 current_insn_ptr
= buildaddr
;
2563 i386_emit_call (fn
);
2564 EMIT_ASM32 (i386_int_call_1_c
,
2566 "lea 0x8(%esp),%esp");
2569 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2572 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2574 unsigned char buf
[16];
2576 CORE_ADDR buildaddr
;
2578 EMIT_ASM32 (i386_void_call_2_a
,
2579 /* Preserve %eax only; we don't have to worry about %ebx. */
2581 /* Reserve a bit of stack space for arguments. */
2582 "sub $0x10,%esp\n\t"
2583 /* Copy "top" to the second argument position. (Note that
2584 we can't assume function won't scribble on its
2585 arguments, so don't try to restore from this.) */
2586 "mov %eax,4(%esp)\n\t"
2587 "mov %ebx,8(%esp)");
2588 /* Put the first argument on the stack. */
2589 buildaddr
= current_insn_ptr
;
2591 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2594 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2596 append_insns (&buildaddr
, i
, buf
);
2597 current_insn_ptr
= buildaddr
;
2598 i386_emit_call (fn
);
2599 EMIT_ASM32 (i386_void_call_2_b
,
2600 "lea 0x10(%esp),%esp\n\t"
2601 /* Restore original stack top. */
2607 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2610 /* Check low half first, more likely to be decider */
2611 "cmpl %eax,(%esp)\n\t"
2612 "jne .Leq_fallthru\n\t"
2613 "cmpl %ebx,4(%esp)\n\t"
2614 "jne .Leq_fallthru\n\t"
2615 "lea 0x8(%esp),%esp\n\t"
2618 /* jmp, but don't trust the assembler to choose the right jump */
2619 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2620 ".Leq_fallthru:\n\t"
2621 "lea 0x8(%esp),%esp\n\t"
2632 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2635 /* Check low half first, more likely to be decider */
2636 "cmpl %eax,(%esp)\n\t"
2638 "cmpl %ebx,4(%esp)\n\t"
2639 "je .Lne_fallthru\n\t"
2641 "lea 0x8(%esp),%esp\n\t"
2644 /* jmp, but don't trust the assembler to choose the right jump */
2645 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2646 ".Lne_fallthru:\n\t"
2647 "lea 0x8(%esp),%esp\n\t"
2658 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2661 "cmpl %ebx,4(%esp)\n\t"
2663 "jne .Llt_fallthru\n\t"
2664 "cmpl %eax,(%esp)\n\t"
2665 "jnl .Llt_fallthru\n\t"
2667 "lea 0x8(%esp),%esp\n\t"
2670 /* jmp, but don't trust the assembler to choose the right jump */
2671 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2672 ".Llt_fallthru:\n\t"
2673 "lea 0x8(%esp),%esp\n\t"
2684 i386_emit_le_goto (int *offset_p
, int *size_p
)
2687 "cmpl %ebx,4(%esp)\n\t"
2689 "jne .Lle_fallthru\n\t"
2690 "cmpl %eax,(%esp)\n\t"
2691 "jnle .Lle_fallthru\n\t"
2693 "lea 0x8(%esp),%esp\n\t"
2696 /* jmp, but don't trust the assembler to choose the right jump */
2697 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2698 ".Lle_fallthru:\n\t"
2699 "lea 0x8(%esp),%esp\n\t"
2710 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2713 "cmpl %ebx,4(%esp)\n\t"
2715 "jne .Lgt_fallthru\n\t"
2716 "cmpl %eax,(%esp)\n\t"
2717 "jng .Lgt_fallthru\n\t"
2719 "lea 0x8(%esp),%esp\n\t"
2722 /* jmp, but don't trust the assembler to choose the right jump */
2723 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2724 ".Lgt_fallthru:\n\t"
2725 "lea 0x8(%esp),%esp\n\t"
2736 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2739 "cmpl %ebx,4(%esp)\n\t"
2741 "jne .Lge_fallthru\n\t"
2742 "cmpl %eax,(%esp)\n\t"
2743 "jnge .Lge_fallthru\n\t"
2745 "lea 0x8(%esp),%esp\n\t"
2748 /* jmp, but don't trust the assembler to choose the right jump */
2749 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2750 ".Lge_fallthru:\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2761 struct emit_ops i386_emit_ops
=
2769 i386_emit_rsh_signed
,
2770 i386_emit_rsh_unsigned
,
2778 i386_emit_less_signed
,
2779 i386_emit_less_unsigned
,
2783 i386_write_goto_address
,
2788 i386_emit_stack_flush
,
2791 i386_emit_stack_adjust
,
2792 i386_emit_int_call_1
,
2793 i386_emit_void_call_2
,
2803 static struct emit_ops
*
2807 if (is_64bit_tdesc ())
2808 return &amd64_emit_ops
;
2811 return &i386_emit_ops
;
2814 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2816 static const gdb_byte
*
2817 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2819 *size
= x86_breakpoint_len
;
2820 return x86_breakpoint
;
2824 x86_supports_range_stepping (void)
2829 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2833 x86_supports_hardware_single_step (void)
2839 x86_get_ipa_tdesc_idx (void)
2841 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2842 const struct target_desc
*tdesc
= regcache
->tdesc
;
2845 return amd64_get_ipa_tdesc_idx (tdesc
);
2848 if (tdesc
== tdesc_i386_linux_no_xml
)
2849 return X86_TDESC_SSE
;
2851 return i386_get_ipa_tdesc_idx (tdesc
);
2854 /* This is initialized assuming an amd64 target.
2855 x86_arch_setup will correct it for i386 or amd64 targets. */
2857 struct linux_target_ops the_low_target
=
2860 x86_linux_regs_info
,
2861 x86_cannot_fetch_register
,
2862 x86_cannot_store_register
,
2863 NULL
, /* fetch_register */
2866 NULL
, /* breakpoint_kind_from_pc */
2867 x86_sw_breakpoint_from_kind
,
2871 x86_supports_z_point_type
,
2874 x86_stopped_by_watchpoint
,
2875 x86_stopped_data_address
,
2876 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2877 native i386 case (no registers smaller than an xfer unit), and are not
2878 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2881 /* need to fix up i386 siginfo if host is amd64 */
2883 x86_linux_new_process
,
2884 x86_linux_delete_process
,
2885 x86_linux_new_thread
,
2886 x86_linux_delete_thread
,
2888 x86_linux_prepare_to_resume
,
2889 x86_linux_process_qsupported
,
2890 x86_supports_tracepoints
,
2891 x86_get_thread_area
,
2892 x86_install_fast_tracepoint_jump_pad
,
2894 x86_get_min_fast_tracepoint_insn_len
,
2895 x86_supports_range_stepping
,
2896 NULL
, /* breakpoint_kind_from_current_state */
2897 x86_supports_hardware_single_step
,
2898 x86_get_syscall_trapinfo
,
2899 x86_get_ipa_tdesc_idx
,
2903 initialize_low_arch (void)
2905 /* Initialize the Linux target descriptions. */
2907 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2908 copy_target_description (tdesc_amd64_linux_no_xml
,
2909 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2911 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2914 tdesc_i386_linux_no_xml
= allocate_target_description ();
2915 copy_target_description (tdesc_i386_linux_no_xml
,
2916 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2917 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2919 initialize_regsets_info (&x86_regsets_info
);