1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Per-process arch-specific data we want to keep. */
97 struct arch_process_info
99 struct x86_debug_reg_state debug_reg_state
;
104 /* Mapping between the general-purpose registers in `struct user'
105 format and GDB's register array layout.
106 Note that the transfer layout uses 64-bit regs. */
107 static /*const*/ int i386_regmap
[] =
109 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
110 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
111 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
112 DS
* 8, ES
* 8, FS
* 8, GS
* 8
115 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117 /* So code below doesn't have to care, i386 or amd64. */
118 #define ORIG_EAX ORIG_RAX
121 static const int x86_64_regmap
[] =
123 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
124 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
125 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
126 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
127 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
128 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
129 -1, -1, -1, -1, -1, -1, -1, -1,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
135 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
140 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
141 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
142 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1,
144 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
147 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1,
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
157 #else /* ! __x86_64__ */
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap
[] =
163 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
164 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
165 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
166 DS
* 4, ES
* 4, FS
* 4, GS
* 4
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177 /* Returns true if the current inferior belongs to a x86-64 process,
181 is_64bit_tdesc (void)
183 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
185 return register_size (regcache
->tdesc
, 0) == 8;
191 /* Called by libthread_db. */
194 ps_get_thread_area (struct ps_prochandle
*ph
,
195 lwpid_t lwpid
, int idx
, void **base
)
198 int use_64bit
= is_64bit_tdesc ();
205 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
209 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
220 unsigned int desc
[4];
222 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
223 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base
= (void *) (uintptr_t) desc
[1];
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
238 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
241 int use_64bit
= is_64bit_tdesc ();
246 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
248 *addr
= (CORE_ADDR
) (uintptr_t) base
;
257 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
258 struct thread_info
*thr
= get_lwp_thread (lwp
);
259 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
260 unsigned int desc
[4];
262 const int reg_thread_area
= 3; /* bits to scale down register value. */
265 collect_register_by_name (regcache
, "gs", &gs
);
267 idx
= gs
>> reg_thread_area
;
269 if (ptrace (PTRACE_GET_THREAD_AREA
,
271 (void *) (long) idx
, (unsigned long) &desc
) < 0)
282 x86_cannot_store_register (int regno
)
285 if (is_64bit_tdesc ())
289 return regno
>= I386_NUM_REGS
;
293 x86_cannot_fetch_register (int regno
)
296 if (is_64bit_tdesc ())
300 return regno
>= I386_NUM_REGS
;
304 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
309 if (register_size (regcache
->tdesc
, 0) == 8)
311 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
312 if (x86_64_regmap
[i
] != -1)
313 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
318 int lwpid
= lwpid_of (current_thread
);
320 collect_register_by_name (regcache
, "fs_base", &base
);
321 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
323 collect_register_by_name (regcache
, "gs_base", &base
);
324 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
336 for (i
= 0; i
< I386_NUM_REGS
; i
++)
337 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
339 collect_register_by_name (regcache
, "orig_eax",
340 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
343 /* Sign extend EAX value to avoid potential syscall restart
346 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
347 for a detailed explanation. */
348 if (register_size (regcache
->tdesc
, 0) == 4)
350 void *ptr
= ((gdb_byte
*) buf
351 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
353 *(int64_t *) ptr
= *(int32_t *) ptr
;
359 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
364 if (register_size (regcache
->tdesc
, 0) == 8)
366 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
367 if (x86_64_regmap
[i
] != -1)
368 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
370 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
373 int lwpid
= lwpid_of (current_thread
);
375 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
376 supply_register_by_name (regcache
, "fs_base", &base
);
378 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
379 supply_register_by_name (regcache
, "gs_base", &base
);
386 for (i
= 0; i
< I386_NUM_REGS
; i
++)
387 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
389 supply_register_by_name (regcache
, "orig_eax",
390 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
394 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
397 i387_cache_to_fxsave (regcache
, buf
);
399 i387_cache_to_fsave (regcache
, buf
);
404 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
407 i387_fxsave_to_cache (regcache
, buf
);
409 i387_fsave_to_cache (regcache
, buf
);
416 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_fxsave (regcache
, buf
);
422 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
424 i387_fxsave_to_cache (regcache
, buf
);
430 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
432 i387_cache_to_xsave (regcache
, buf
);
436 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
438 i387_xsave_to_cache (regcache
, buf
);
441 /* ??? The non-biarch i386 case stores all the i387 regs twice.
442 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
443 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
444 doesn't work. IWBN to avoid the duplication in the case where it
445 does work. Maybe the arch_setup routine could check whether it works
446 and update the supported regsets accordingly. */
448 static struct regset_info x86_regsets
[] =
450 #ifdef HAVE_PTRACE_GETREGS
451 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
453 x86_fill_gregset
, x86_store_gregset
},
454 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
455 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
457 # ifdef HAVE_PTRACE_GETFPXREGS
458 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
460 x86_fill_fpxregset
, x86_store_fpxregset
},
463 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
465 x86_fill_fpregset
, x86_store_fpregset
},
466 #endif /* HAVE_PTRACE_GETREGS */
471 x86_get_pc (struct regcache
*regcache
)
473 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
479 collect_register_by_name (regcache
, "rip", &pc
);
480 return (CORE_ADDR
) pc
;
486 collect_register_by_name (regcache
, "eip", &pc
);
487 return (CORE_ADDR
) pc
;
492 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
494 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
500 supply_register_by_name (regcache
, "rip", &newpc
);
506 supply_register_by_name (regcache
, "eip", &newpc
);
510 static const gdb_byte x86_breakpoint
[] = { 0xCC };
511 #define x86_breakpoint_len 1
514 x86_breakpoint_at (CORE_ADDR pc
)
518 (*the_target
->read_memory
) (pc
, &c
, 1);
525 /* Low-level function vector. */
526 struct x86_dr_low_type x86_dr_low
=
528 x86_linux_dr_set_control
,
529 x86_linux_dr_set_addr
,
530 x86_linux_dr_get_addr
,
531 x86_linux_dr_get_status
,
532 x86_linux_dr_get_control
,
536 /* Breakpoint/Watchpoint support. */
539 x86_supports_z_point_type (char z_type
)
545 case Z_PACKET_WRITE_WP
:
546 case Z_PACKET_ACCESS_WP
:
554 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
555 int size
, struct raw_breakpoint
*bp
)
557 struct process_info
*proc
= current_process ();
561 case raw_bkpt_type_hw
:
562 case raw_bkpt_type_write_wp
:
563 case raw_bkpt_type_access_wp
:
565 enum target_hw_bp_type hw_type
566 = raw_bkpt_type_to_target_hw_bp_type (type
);
567 struct x86_debug_reg_state
*state
568 = &proc
->priv
->arch_private
->debug_reg_state
;
570 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
580 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
581 int size
, struct raw_breakpoint
*bp
)
583 struct process_info
*proc
= current_process ();
587 case raw_bkpt_type_hw
:
588 case raw_bkpt_type_write_wp
:
589 case raw_bkpt_type_access_wp
:
591 enum target_hw_bp_type hw_type
592 = raw_bkpt_type_to_target_hw_bp_type (type
);
593 struct x86_debug_reg_state
*state
594 = &proc
->priv
->arch_private
->debug_reg_state
;
596 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
605 x86_stopped_by_watchpoint (void)
607 struct process_info
*proc
= current_process ();
608 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
612 x86_stopped_data_address (void)
614 struct process_info
*proc
= current_process ();
616 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
622 /* Called when a new process is created. */
624 static struct arch_process_info
*
625 x86_linux_new_process (void)
627 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
629 x86_low_init_dregs (&info
->debug_reg_state
);
634 /* Called when a process is being deleted. */
637 x86_linux_delete_process (struct arch_process_info
*info
)
642 /* Target routine for linux_new_fork. */
645 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
647 /* These are allocated by linux_add_process. */
648 gdb_assert (parent
->priv
!= NULL
649 && parent
->priv
->arch_private
!= NULL
);
650 gdb_assert (child
->priv
!= NULL
651 && child
->priv
->arch_private
!= NULL
);
653 /* Linux kernel before 2.6.33 commit
654 72f674d203cd230426437cdcf7dd6f681dad8b0d
655 will inherit hardware debug registers from parent
656 on fork/vfork/clone. Newer Linux kernels create such tasks with
657 zeroed debug registers.
659 GDB core assumes the child inherits the watchpoints/hw
660 breakpoints of the parent, and will remove them all from the
661 forked off process. Copy the debug registers mirrors into the
662 new process so that all breakpoints and watchpoints can be
663 removed together. The debug registers mirror will become zeroed
664 in the end before detaching the forked off process, thus making
665 this compatible with older Linux kernels too. */
667 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
670 /* See nat/x86-dregs.h. */
672 struct x86_debug_reg_state
*
673 x86_debug_reg_state (pid_t pid
)
675 struct process_info
*proc
= find_process_pid (pid
);
677 return &proc
->priv
->arch_private
->debug_reg_state
;
680 /* When GDBSERVER is built as a 64-bit application on linux, the
681 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
682 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
683 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
684 conversion in-place ourselves. */
686 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
687 layout of the inferiors' architecture. Returns true if any
688 conversion was done; false otherwise. If DIRECTION is 1, then copy
689 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
693 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
696 unsigned int machine
;
697 int tid
= lwpid_of (current_thread
);
698 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
700 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
701 if (!is_64bit_tdesc ())
702 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
704 /* No fixup for native x32 GDB. */
705 else if (!is_elf64
&& sizeof (void *) == 8)
706 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
715 /* Format of XSAVE extended state is:
719 sw_usable_bytes[464..511]
720 xstate_hdr_bytes[512..575]
725 Same memory layout will be used for the coredump NT_X86_XSTATE
726 representing the XSAVE extended state registers.
728 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
729 extended state mask, which is the same as the extended control register
730 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
731 together with the mask saved in the xstate_hdr_bytes to determine what
732 states the processor/OS supports and what state, used or initialized,
733 the process/thread is in. */
734 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
736 /* Does the current host support the GETFPXREGS request? The header
737 file may or may not define it, and even if it is defined, the
738 kernel will return EIO if it's running on a pre-SSE processor. */
739 int have_ptrace_getfpxregs
=
740 #ifdef HAVE_PTRACE_GETFPXREGS
747 /* Get Linux/x86 target description from running target. */
749 static const struct target_desc
*
750 x86_linux_read_description (void)
752 unsigned int machine
;
756 static uint64_t xcr0
;
757 struct regset_info
*regset
;
759 tid
= lwpid_of (current_thread
);
761 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
763 if (sizeof (void *) == 4)
766 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
768 else if (machine
== EM_X86_64
)
769 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
773 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
774 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
776 elf_fpxregset_t fpxregs
;
778 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
780 have_ptrace_getfpxregs
= 0;
781 have_ptrace_getregset
= 0;
782 return i386_linux_read_description (X86_XSTATE_X87
);
785 have_ptrace_getfpxregs
= 1;
791 x86_xcr0
= X86_XSTATE_SSE_MASK
;
795 if (machine
== EM_X86_64
)
796 return tdesc_amd64_linux_no_xml
;
799 return tdesc_i386_linux_no_xml
;
802 if (have_ptrace_getregset
== -1)
804 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
807 iov
.iov_base
= xstateregs
;
808 iov
.iov_len
= sizeof (xstateregs
);
810 /* Check if PTRACE_GETREGSET works. */
811 if (ptrace (PTRACE_GETREGSET
, tid
,
812 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
813 have_ptrace_getregset
= 0;
816 have_ptrace_getregset
= 1;
818 /* Get XCR0 from XSAVE extended state. */
819 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
820 / sizeof (uint64_t))];
822 /* Use PTRACE_GETREGSET if it is available. */
823 for (regset
= x86_regsets
;
824 regset
->fill_function
!= NULL
; regset
++)
825 if (regset
->get_request
== PTRACE_GETREGSET
)
826 regset
->size
= X86_XSTATE_SIZE (xcr0
);
827 else if (regset
->type
!= GENERAL_REGS
)
832 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
833 xcr0_features
= (have_ptrace_getregset
834 && (xcr0
& X86_XSTATE_ALL_MASK
));
839 if (machine
== EM_X86_64
)
842 const target_desc
*tdesc
= NULL
;
846 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
851 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
857 const target_desc
*tdesc
= NULL
;
860 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
863 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
868 gdb_assert_not_reached ("failed to return tdesc");
871 /* Update all the target description of all processes; a new GDB
872 connected, and it may or not support xml target descriptions. */
875 x86_linux_update_xmltarget (void)
877 struct thread_info
*saved_thread
= current_thread
;
879 /* Before changing the register cache's internal layout, flush the
880 contents of the current valid caches back to the threads, and
881 release the current regcache objects. */
884 for_each_process ([] (process_info
*proc
) {
887 /* Look up any thread of this process. */
888 current_thread
= find_any_thread_of_pid (pid
);
890 the_low_target
.arch_setup ();
893 current_thread
= saved_thread
;
896 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
900 x86_linux_process_qsupported (char **features
, int count
)
904 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
905 with "i386" in qSupported query, it supports x86 XML target
908 for (i
= 0; i
< count
; i
++)
910 const char *feature
= features
[i
];
912 if (startswith (feature
, "xmlRegisters="))
914 char *copy
= xstrdup (feature
+ 13);
917 for (char *p
= strtok_r (copy
, ",", &saveptr
);
919 p
= strtok_r (NULL
, ",", &saveptr
))
921 if (strcmp (p
, "i386") == 0)
931 x86_linux_update_xmltarget ();
934 /* Common for x86/x86-64. */
936 static struct regsets_info x86_regsets_info
=
938 x86_regsets
, /* regsets */
940 NULL
, /* disabled_regsets */
944 static struct regs_info amd64_linux_regs_info
=
946 NULL
, /* regset_bitmap */
947 NULL
, /* usrregs_info */
951 static struct usrregs_info i386_linux_usrregs_info
=
957 static struct regs_info i386_linux_regs_info
=
959 NULL
, /* regset_bitmap */
960 &i386_linux_usrregs_info
,
964 static const struct regs_info
*
965 x86_linux_regs_info (void)
968 if (is_64bit_tdesc ())
969 return &amd64_linux_regs_info
;
972 return &i386_linux_regs_info
;
975 /* Initialize the target description for the architecture of the
979 x86_arch_setup (void)
981 current_process ()->tdesc
= x86_linux_read_description ();
984 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
985 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
988 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
990 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
996 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
997 *sysno
= (int) l_sysno
;
1000 collect_register_by_name (regcache
, "orig_eax", sysno
);
1004 x86_supports_tracepoints (void)
1010 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1012 target_write_memory (*to
, buf
, len
);
1017 push_opcode (unsigned char *buf
, const char *op
)
1019 unsigned char *buf_org
= buf
;
1024 unsigned long ul
= strtoul (op
, &endptr
, 16);
1033 return buf
- buf_org
;
1038 /* Build a jump pad that saves registers and calls a collection
1039 function. Writes a jump instruction to the jump pad to
1040 JJUMPAD_INSN. The caller is responsible to write it in at the
1041 tracepoint address. */
1044 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1045 CORE_ADDR collector
,
1048 CORE_ADDR
*jump_entry
,
1049 CORE_ADDR
*trampoline
,
1050 ULONGEST
*trampoline_size
,
1051 unsigned char *jjump_pad_insn
,
1052 ULONGEST
*jjump_pad_insn_size
,
1053 CORE_ADDR
*adjusted_insn_addr
,
1054 CORE_ADDR
*adjusted_insn_addr_end
,
1057 unsigned char buf
[40];
1061 CORE_ADDR buildaddr
= *jump_entry
;
1063 /* Build the jump pad. */
1065 /* First, do tracepoint data collection. Save registers. */
1067 /* Need to ensure stack pointer saved first. */
1068 buf
[i
++] = 0x54; /* push %rsp */
1069 buf
[i
++] = 0x55; /* push %rbp */
1070 buf
[i
++] = 0x57; /* push %rdi */
1071 buf
[i
++] = 0x56; /* push %rsi */
1072 buf
[i
++] = 0x52; /* push %rdx */
1073 buf
[i
++] = 0x51; /* push %rcx */
1074 buf
[i
++] = 0x53; /* push %rbx */
1075 buf
[i
++] = 0x50; /* push %rax */
1076 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1077 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1078 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1079 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1080 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1081 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1082 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1083 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1084 buf
[i
++] = 0x9c; /* pushfq */
1085 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1087 memcpy (buf
+ i
, &tpaddr
, 8);
1089 buf
[i
++] = 0x57; /* push %rdi */
1090 append_insns (&buildaddr
, i
, buf
);
1092 /* Stack space for the collecting_t object. */
1094 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1095 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1096 memcpy (buf
+ i
, &tpoint
, 8);
1098 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1099 i
+= push_opcode (&buf
[i
],
1100 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1101 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1102 append_insns (&buildaddr
, i
, buf
);
1106 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1107 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1109 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1110 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1111 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1112 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1113 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1114 append_insns (&buildaddr
, i
, buf
);
1116 /* Set up the gdb_collect call. */
1117 /* At this point, (stack pointer + 0x18) is the base of our saved
1121 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1122 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1124 /* tpoint address may be 64-bit wide. */
1125 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1126 memcpy (buf
+ i
, &tpoint
, 8);
1128 append_insns (&buildaddr
, i
, buf
);
1130 /* The collector function being in the shared library, may be
1131 >31-bits away off the jump pad. */
1133 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1134 memcpy (buf
+ i
, &collector
, 8);
1136 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1137 append_insns (&buildaddr
, i
, buf
);
1139 /* Clear the spin-lock. */
1141 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1142 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1143 memcpy (buf
+ i
, &lockaddr
, 8);
1145 append_insns (&buildaddr
, i
, buf
);
1147 /* Remove stack that had been used for the collect_t object. */
1149 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1150 append_insns (&buildaddr
, i
, buf
);
1152 /* Restore register state. */
1154 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1158 buf
[i
++] = 0x9d; /* popfq */
1159 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1160 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1161 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1162 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1163 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1164 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1165 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1166 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1167 buf
[i
++] = 0x58; /* pop %rax */
1168 buf
[i
++] = 0x5b; /* pop %rbx */
1169 buf
[i
++] = 0x59; /* pop %rcx */
1170 buf
[i
++] = 0x5a; /* pop %rdx */
1171 buf
[i
++] = 0x5e; /* pop %rsi */
1172 buf
[i
++] = 0x5f; /* pop %rdi */
1173 buf
[i
++] = 0x5d; /* pop %rbp */
1174 buf
[i
++] = 0x5c; /* pop %rsp */
1175 append_insns (&buildaddr
, i
, buf
);
1177 /* Now, adjust the original instruction to execute in the jump
1179 *adjusted_insn_addr
= buildaddr
;
1180 relocate_instruction (&buildaddr
, tpaddr
);
1181 *adjusted_insn_addr_end
= buildaddr
;
1183 /* Finally, write a jump back to the program. */
1185 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1186 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1189 "E.Jump back from jump pad too far from tracepoint "
1190 "(offset 0x%" PRIx64
" > int32).", loffset
);
1194 offset
= (int) loffset
;
1195 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1196 memcpy (buf
+ 1, &offset
, 4);
1197 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1199 /* The jump pad is now built. Wire in a jump to our jump pad. This
1200 is always done last (by our caller actually), so that we can
1201 install fast tracepoints with threads running. This relies on
1202 the agent's atomic write support. */
1203 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1204 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1207 "E.Jump pad too far from tracepoint "
1208 "(offset 0x%" PRIx64
" > int32).", loffset
);
1212 offset
= (int) loffset
;
1214 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1215 memcpy (buf
+ 1, &offset
, 4);
1216 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1217 *jjump_pad_insn_size
= sizeof (jump_insn
);
1219 /* Return the end address of our pad. */
1220 *jump_entry
= buildaddr
;
1225 #endif /* __x86_64__ */
1227 /* Build a jump pad that saves registers and calls a collection
1228 function. Writes a jump instruction to the jump pad to
1229 JJUMPAD_INSN. The caller is responsible to write it in at the
1230 tracepoint address. */
1233 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1234 CORE_ADDR collector
,
1237 CORE_ADDR
*jump_entry
,
1238 CORE_ADDR
*trampoline
,
1239 ULONGEST
*trampoline_size
,
1240 unsigned char *jjump_pad_insn
,
1241 ULONGEST
*jjump_pad_insn_size
,
1242 CORE_ADDR
*adjusted_insn_addr
,
1243 CORE_ADDR
*adjusted_insn_addr_end
,
1246 unsigned char buf
[0x100];
1248 CORE_ADDR buildaddr
= *jump_entry
;
1250 /* Build the jump pad. */
1252 /* First, do tracepoint data collection. Save registers. */
1254 buf
[i
++] = 0x60; /* pushad */
1255 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1256 *((int *)(buf
+ i
)) = (int) tpaddr
;
1258 buf
[i
++] = 0x9c; /* pushf */
1259 buf
[i
++] = 0x1e; /* push %ds */
1260 buf
[i
++] = 0x06; /* push %es */
1261 buf
[i
++] = 0x0f; /* push %fs */
1263 buf
[i
++] = 0x0f; /* push %gs */
1265 buf
[i
++] = 0x16; /* push %ss */
1266 buf
[i
++] = 0x0e; /* push %cs */
1267 append_insns (&buildaddr
, i
, buf
);
1269 /* Stack space for the collecting_t object. */
1271 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1273 /* Build the object. */
1274 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1275 memcpy (buf
+ i
, &tpoint
, 4);
1277 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1279 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1280 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1281 append_insns (&buildaddr
, i
, buf
);
1283 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1284 If we cared for it, this could be using xchg alternatively. */
1287 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1288 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1290 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1292 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1293 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1294 append_insns (&buildaddr
, i
, buf
);
1297 /* Set up arguments to the gdb_collect call. */
1299 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1300 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1301 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1302 append_insns (&buildaddr
, i
, buf
);
1305 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1306 append_insns (&buildaddr
, i
, buf
);
1309 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1310 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1312 append_insns (&buildaddr
, i
, buf
);
1314 buf
[0] = 0xe8; /* call <reladdr> */
1315 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1316 memcpy (buf
+ 1, &offset
, 4);
1317 append_insns (&buildaddr
, 5, buf
);
1318 /* Clean up after the call. */
1319 buf
[0] = 0x83; /* add $0x8,%esp */
1322 append_insns (&buildaddr
, 3, buf
);
1325 /* Clear the spin-lock. This would need the LOCK prefix on older
1328 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1329 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1330 memcpy (buf
+ i
, &lockaddr
, 4);
1332 append_insns (&buildaddr
, i
, buf
);
1335 /* Remove stack that had been used for the collect_t object. */
1337 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1338 append_insns (&buildaddr
, i
, buf
);
1341 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1344 buf
[i
++] = 0x17; /* pop %ss */
1345 buf
[i
++] = 0x0f; /* pop %gs */
1347 buf
[i
++] = 0x0f; /* pop %fs */
1349 buf
[i
++] = 0x07; /* pop %es */
1350 buf
[i
++] = 0x1f; /* pop %ds */
1351 buf
[i
++] = 0x9d; /* popf */
1352 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1355 buf
[i
++] = 0x61; /* popad */
1356 append_insns (&buildaddr
, i
, buf
);
1358 /* Now, adjust the original instruction to execute in the jump
1360 *adjusted_insn_addr
= buildaddr
;
1361 relocate_instruction (&buildaddr
, tpaddr
);
1362 *adjusted_insn_addr_end
= buildaddr
;
1364 /* Write the jump back to the program. */
1365 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1366 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1367 memcpy (buf
+ 1, &offset
, 4);
1368 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1370 /* The jump pad is now built. Wire in a jump to our jump pad. This
1371 is always done last (by our caller actually), so that we can
1372 install fast tracepoints with threads running. This relies on
1373 the agent's atomic write support. */
1376 /* Create a trampoline. */
1377 *trampoline_size
= sizeof (jump_insn
);
1378 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1380 /* No trampoline space available. */
1382 "E.Cannot allocate trampoline space needed for fast "
1383 "tracepoints on 4-byte instructions.");
1387 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1388 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1389 memcpy (buf
+ 1, &offset
, 4);
1390 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1392 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1393 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1394 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1395 memcpy (buf
+ 2, &offset
, 2);
1396 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1397 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1401 /* Else use a 32-bit relative jump instruction. */
1402 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1403 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1404 memcpy (buf
+ 1, &offset
, 4);
1405 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1406 *jjump_pad_insn_size
= sizeof (jump_insn
);
1409 /* Return the end address of our pad. */
1410 *jump_entry
= buildaddr
;
1416 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1417 CORE_ADDR collector
,
1420 CORE_ADDR
*jump_entry
,
1421 CORE_ADDR
*trampoline
,
1422 ULONGEST
*trampoline_size
,
1423 unsigned char *jjump_pad_insn
,
1424 ULONGEST
*jjump_pad_insn_size
,
1425 CORE_ADDR
*adjusted_insn_addr
,
1426 CORE_ADDR
*adjusted_insn_addr_end
,
1430 if (is_64bit_tdesc ())
1431 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1432 collector
, lockaddr
,
1433 orig_size
, jump_entry
,
1434 trampoline
, trampoline_size
,
1436 jjump_pad_insn_size
,
1438 adjusted_insn_addr_end
,
1442 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1443 collector
, lockaddr
,
1444 orig_size
, jump_entry
,
1445 trampoline
, trampoline_size
,
1447 jjump_pad_insn_size
,
1449 adjusted_insn_addr_end
,
1453 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1457 x86_get_min_fast_tracepoint_insn_len (void)
1459 static int warned_about_fast_tracepoints
= 0;
1462 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1463 used for fast tracepoints. */
1464 if (is_64bit_tdesc ())
1468 if (agent_loaded_p ())
1470 char errbuf
[IPA_BUFSIZ
];
1474 /* On x86, if trampolines are available, then 4-byte jump instructions
1475 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1476 with a 4-byte offset are used instead. */
1477 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1481 /* GDB has no channel to explain to user why a shorter fast
1482 tracepoint is not possible, but at least make GDBserver
1483 mention that something has gone awry. */
1484 if (!warned_about_fast_tracepoints
)
1486 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1487 warned_about_fast_tracepoints
= 1;
1494 /* Indicate that the minimum length is currently unknown since the IPA
1495 has not loaded yet. */
1501 add_insns (unsigned char *start
, int len
)
1503 CORE_ADDR buildaddr
= current_insn_ptr
;
1506 debug_printf ("Adding %d bytes of insn at %s\n",
1507 len
, paddress (buildaddr
));
1509 append_insns (&buildaddr
, len
, start
);
1510 current_insn_ptr
= buildaddr
;
1513 /* Our general strategy for emitting code is to avoid specifying raw
1514 bytes whenever possible, and instead copy a block of inline asm
1515 that is embedded in the function. This is a little messy, because
1516 we need to keep the compiler from discarding what looks like dead
1517 code, plus suppress various warnings. */
1519 #define EMIT_ASM(NAME, INSNS) \
1522 extern unsigned char start_ ## NAME, end_ ## NAME; \
1523 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1524 __asm__ ("jmp end_" #NAME "\n" \
1525 "\t" "start_" #NAME ":" \
1527 "\t" "end_" #NAME ":"); \
1532 #define EMIT_ASM32(NAME,INSNS) \
1535 extern unsigned char start_ ## NAME, end_ ## NAME; \
1536 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1537 __asm__ (".code32\n" \
1538 "\t" "jmp end_" #NAME "\n" \
1539 "\t" "start_" #NAME ":\n" \
1541 "\t" "end_" #NAME ":\n" \
1547 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1554 amd64_emit_prologue (void)
1556 EMIT_ASM (amd64_prologue
,
1558 "movq %rsp,%rbp\n\t"
1559 "sub $0x20,%rsp\n\t"
1560 "movq %rdi,-8(%rbp)\n\t"
1561 "movq %rsi,-16(%rbp)");
1566 amd64_emit_epilogue (void)
1568 EMIT_ASM (amd64_epilogue
,
1569 "movq -16(%rbp),%rdi\n\t"
1570 "movq %rax,(%rdi)\n\t"
1577 amd64_emit_add (void)
1579 EMIT_ASM (amd64_add
,
1580 "add (%rsp),%rax\n\t"
1581 "lea 0x8(%rsp),%rsp");
1585 amd64_emit_sub (void)
1587 EMIT_ASM (amd64_sub
,
1588 "sub %rax,(%rsp)\n\t"
1593 amd64_emit_mul (void)
1599 amd64_emit_lsh (void)
1605 amd64_emit_rsh_signed (void)
1611 amd64_emit_rsh_unsigned (void)
1617 amd64_emit_ext (int arg
)
1622 EMIT_ASM (amd64_ext_8
,
1628 EMIT_ASM (amd64_ext_16
,
1633 EMIT_ASM (amd64_ext_32
,
1642 amd64_emit_log_not (void)
1644 EMIT_ASM (amd64_log_not
,
1645 "test %rax,%rax\n\t"
1651 amd64_emit_bit_and (void)
1653 EMIT_ASM (amd64_and
,
1654 "and (%rsp),%rax\n\t"
1655 "lea 0x8(%rsp),%rsp");
1659 amd64_emit_bit_or (void)
1662 "or (%rsp),%rax\n\t"
1663 "lea 0x8(%rsp),%rsp");
1667 amd64_emit_bit_xor (void)
1669 EMIT_ASM (amd64_xor
,
1670 "xor (%rsp),%rax\n\t"
1671 "lea 0x8(%rsp),%rsp");
1675 amd64_emit_bit_not (void)
1677 EMIT_ASM (amd64_bit_not
,
1678 "xorq $0xffffffffffffffff,%rax");
1682 amd64_emit_equal (void)
1684 EMIT_ASM (amd64_equal
,
1685 "cmp %rax,(%rsp)\n\t"
1686 "je .Lamd64_equal_true\n\t"
1688 "jmp .Lamd64_equal_end\n\t"
1689 ".Lamd64_equal_true:\n\t"
1691 ".Lamd64_equal_end:\n\t"
1692 "lea 0x8(%rsp),%rsp");
1696 amd64_emit_less_signed (void)
1698 EMIT_ASM (amd64_less_signed
,
1699 "cmp %rax,(%rsp)\n\t"
1700 "jl .Lamd64_less_signed_true\n\t"
1702 "jmp .Lamd64_less_signed_end\n\t"
1703 ".Lamd64_less_signed_true:\n\t"
1705 ".Lamd64_less_signed_end:\n\t"
1706 "lea 0x8(%rsp),%rsp");
1710 amd64_emit_less_unsigned (void)
1712 EMIT_ASM (amd64_less_unsigned
,
1713 "cmp %rax,(%rsp)\n\t"
1714 "jb .Lamd64_less_unsigned_true\n\t"
1716 "jmp .Lamd64_less_unsigned_end\n\t"
1717 ".Lamd64_less_unsigned_true:\n\t"
1719 ".Lamd64_less_unsigned_end:\n\t"
1720 "lea 0x8(%rsp),%rsp");
1724 amd64_emit_ref (int size
)
1729 EMIT_ASM (amd64_ref1
,
1733 EMIT_ASM (amd64_ref2
,
1737 EMIT_ASM (amd64_ref4
,
1738 "movl (%rax),%eax");
1741 EMIT_ASM (amd64_ref8
,
1742 "movq (%rax),%rax");
1748 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1750 EMIT_ASM (amd64_if_goto
,
1754 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1762 amd64_emit_goto (int *offset_p
, int *size_p
)
1764 EMIT_ASM (amd64_goto
,
1765 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1773 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1775 int diff
= (to
- (from
+ size
));
1776 unsigned char buf
[sizeof (int)];
1784 memcpy (buf
, &diff
, sizeof (int));
1785 target_write_memory (from
, buf
, sizeof (int));
1789 amd64_emit_const (LONGEST num
)
1791 unsigned char buf
[16];
1793 CORE_ADDR buildaddr
= current_insn_ptr
;
1796 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1797 memcpy (&buf
[i
], &num
, sizeof (num
));
1799 append_insns (&buildaddr
, i
, buf
);
1800 current_insn_ptr
= buildaddr
;
1804 amd64_emit_call (CORE_ADDR fn
)
1806 unsigned char buf
[16];
1808 CORE_ADDR buildaddr
;
1811 /* The destination function being in the shared library, may be
1812 >31-bits away off the compiled code pad. */
1814 buildaddr
= current_insn_ptr
;
1816 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1820 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1822 /* Offset is too large for a call. Use callq, but that requires
1823 a register, so avoid it if possible. Use r10, since it is
1824 call-clobbered, we don't have to push/pop it. */
1825 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1827 memcpy (buf
+ i
, &fn
, 8);
1829 buf
[i
++] = 0xff; /* callq *%r10 */
1834 int offset32
= offset64
; /* we know we can't overflow here. */
1836 buf
[i
++] = 0xe8; /* call <reladdr> */
1837 memcpy (buf
+ i
, &offset32
, 4);
1841 append_insns (&buildaddr
, i
, buf
);
1842 current_insn_ptr
= buildaddr
;
1846 amd64_emit_reg (int reg
)
1848 unsigned char buf
[16];
1850 CORE_ADDR buildaddr
;
1852 /* Assume raw_regs is still in %rdi. */
1853 buildaddr
= current_insn_ptr
;
1855 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1856 memcpy (&buf
[i
], ®
, sizeof (reg
));
1858 append_insns (&buildaddr
, i
, buf
);
1859 current_insn_ptr
= buildaddr
;
1860 amd64_emit_call (get_raw_reg_func_addr ());
1864 amd64_emit_pop (void)
1866 EMIT_ASM (amd64_pop
,
1871 amd64_emit_stack_flush (void)
1873 EMIT_ASM (amd64_stack_flush
,
1878 amd64_emit_zero_ext (int arg
)
1883 EMIT_ASM (amd64_zero_ext_8
,
1887 EMIT_ASM (amd64_zero_ext_16
,
1888 "and $0xffff,%rax");
1891 EMIT_ASM (amd64_zero_ext_32
,
1892 "mov $0xffffffff,%rcx\n\t"
1901 amd64_emit_swap (void)
1903 EMIT_ASM (amd64_swap
,
1910 amd64_emit_stack_adjust (int n
)
1912 unsigned char buf
[16];
1914 CORE_ADDR buildaddr
= current_insn_ptr
;
1917 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1921 /* This only handles adjustments up to 16, but we don't expect any more. */
1923 append_insns (&buildaddr
, i
, buf
);
1924 current_insn_ptr
= buildaddr
;
1927 /* FN's prototype is `LONGEST(*fn)(int)'. */
1930 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1932 unsigned char buf
[16];
1934 CORE_ADDR buildaddr
;
1936 buildaddr
= current_insn_ptr
;
1938 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1939 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1941 append_insns (&buildaddr
, i
, buf
);
1942 current_insn_ptr
= buildaddr
;
1943 amd64_emit_call (fn
);
1946 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1949 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1951 unsigned char buf
[16];
1953 CORE_ADDR buildaddr
;
1955 buildaddr
= current_insn_ptr
;
1957 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1958 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1960 append_insns (&buildaddr
, i
, buf
);
1961 current_insn_ptr
= buildaddr
;
1962 EMIT_ASM (amd64_void_call_2_a
,
1963 /* Save away a copy of the stack top. */
1965 /* Also pass top as the second argument. */
1967 amd64_emit_call (fn
);
1968 EMIT_ASM (amd64_void_call_2_b
,
1969 /* Restore the stack top, %rax may have been trashed. */
1974 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1977 "cmp %rax,(%rsp)\n\t"
1978 "jne .Lamd64_eq_fallthru\n\t"
1979 "lea 0x8(%rsp),%rsp\n\t"
1981 /* jmp, but don't trust the assembler to choose the right jump */
1982 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1983 ".Lamd64_eq_fallthru:\n\t"
1984 "lea 0x8(%rsp),%rsp\n\t"
1994 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
1997 "cmp %rax,(%rsp)\n\t"
1998 "je .Lamd64_ne_fallthru\n\t"
1999 "lea 0x8(%rsp),%rsp\n\t"
2001 /* jmp, but don't trust the assembler to choose the right jump */
2002 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2003 ".Lamd64_ne_fallthru:\n\t"
2004 "lea 0x8(%rsp),%rsp\n\t"
2014 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2017 "cmp %rax,(%rsp)\n\t"
2018 "jnl .Lamd64_lt_fallthru\n\t"
2019 "lea 0x8(%rsp),%rsp\n\t"
2021 /* jmp, but don't trust the assembler to choose the right jump */
2022 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2023 ".Lamd64_lt_fallthru:\n\t"
2024 "lea 0x8(%rsp),%rsp\n\t"
2034 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2037 "cmp %rax,(%rsp)\n\t"
2038 "jnle .Lamd64_le_fallthru\n\t"
2039 "lea 0x8(%rsp),%rsp\n\t"
2041 /* jmp, but don't trust the assembler to choose the right jump */
2042 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2043 ".Lamd64_le_fallthru:\n\t"
2044 "lea 0x8(%rsp),%rsp\n\t"
2054 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2057 "cmp %rax,(%rsp)\n\t"
2058 "jng .Lamd64_gt_fallthru\n\t"
2059 "lea 0x8(%rsp),%rsp\n\t"
2061 /* jmp, but don't trust the assembler to choose the right jump */
2062 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2063 ".Lamd64_gt_fallthru:\n\t"
2064 "lea 0x8(%rsp),%rsp\n\t"
2074 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2077 "cmp %rax,(%rsp)\n\t"
2078 "jnge .Lamd64_ge_fallthru\n\t"
2079 ".Lamd64_ge_jump:\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2082 /* jmp, but don't trust the assembler to choose the right jump */
2083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2084 ".Lamd64_ge_fallthru:\n\t"
2085 "lea 0x8(%rsp),%rsp\n\t"
2094 struct emit_ops amd64_emit_ops
=
2096 amd64_emit_prologue
,
2097 amd64_emit_epilogue
,
2102 amd64_emit_rsh_signed
,
2103 amd64_emit_rsh_unsigned
,
2111 amd64_emit_less_signed
,
2112 amd64_emit_less_unsigned
,
2116 amd64_write_goto_address
,
2121 amd64_emit_stack_flush
,
2122 amd64_emit_zero_ext
,
2124 amd64_emit_stack_adjust
,
2125 amd64_emit_int_call_1
,
2126 amd64_emit_void_call_2
,
2135 #endif /* __x86_64__ */
2138 i386_emit_prologue (void)
2140 EMIT_ASM32 (i386_prologue
,
2144 /* At this point, the raw regs base address is at 8(%ebp), and the
2145 value pointer is at 12(%ebp). */
2149 i386_emit_epilogue (void)
2151 EMIT_ASM32 (i386_epilogue
,
2152 "mov 12(%ebp),%ecx\n\t"
2153 "mov %eax,(%ecx)\n\t"
2154 "mov %ebx,0x4(%ecx)\n\t"
2162 i386_emit_add (void)
2164 EMIT_ASM32 (i386_add
,
2165 "add (%esp),%eax\n\t"
2166 "adc 0x4(%esp),%ebx\n\t"
2167 "lea 0x8(%esp),%esp");
2171 i386_emit_sub (void)
2173 EMIT_ASM32 (i386_sub
,
2174 "subl %eax,(%esp)\n\t"
2175 "sbbl %ebx,4(%esp)\n\t"
2181 i386_emit_mul (void)
2187 i386_emit_lsh (void)
2193 i386_emit_rsh_signed (void)
2199 i386_emit_rsh_unsigned (void)
2205 i386_emit_ext (int arg
)
2210 EMIT_ASM32 (i386_ext_8
,
2213 "movl %eax,%ebx\n\t"
2217 EMIT_ASM32 (i386_ext_16
,
2219 "movl %eax,%ebx\n\t"
2223 EMIT_ASM32 (i386_ext_32
,
2224 "movl %eax,%ebx\n\t"
2233 i386_emit_log_not (void)
2235 EMIT_ASM32 (i386_log_not
,
2237 "test %eax,%eax\n\t"
2244 i386_emit_bit_and (void)
2246 EMIT_ASM32 (i386_and
,
2247 "and (%esp),%eax\n\t"
2248 "and 0x4(%esp),%ebx\n\t"
2249 "lea 0x8(%esp),%esp");
2253 i386_emit_bit_or (void)
2255 EMIT_ASM32 (i386_or
,
2256 "or (%esp),%eax\n\t"
2257 "or 0x4(%esp),%ebx\n\t"
2258 "lea 0x8(%esp),%esp");
2262 i386_emit_bit_xor (void)
2264 EMIT_ASM32 (i386_xor
,
2265 "xor (%esp),%eax\n\t"
2266 "xor 0x4(%esp),%ebx\n\t"
2267 "lea 0x8(%esp),%esp");
2271 i386_emit_bit_not (void)
2273 EMIT_ASM32 (i386_bit_not
,
2274 "xor $0xffffffff,%eax\n\t"
2275 "xor $0xffffffff,%ebx\n\t");
2279 i386_emit_equal (void)
2281 EMIT_ASM32 (i386_equal
,
2282 "cmpl %ebx,4(%esp)\n\t"
2283 "jne .Li386_equal_false\n\t"
2284 "cmpl %eax,(%esp)\n\t"
2285 "je .Li386_equal_true\n\t"
2286 ".Li386_equal_false:\n\t"
2288 "jmp .Li386_equal_end\n\t"
2289 ".Li386_equal_true:\n\t"
2291 ".Li386_equal_end:\n\t"
2293 "lea 0x8(%esp),%esp");
2297 i386_emit_less_signed (void)
2299 EMIT_ASM32 (i386_less_signed
,
2300 "cmpl %ebx,4(%esp)\n\t"
2301 "jl .Li386_less_signed_true\n\t"
2302 "jne .Li386_less_signed_false\n\t"
2303 "cmpl %eax,(%esp)\n\t"
2304 "jl .Li386_less_signed_true\n\t"
2305 ".Li386_less_signed_false:\n\t"
2307 "jmp .Li386_less_signed_end\n\t"
2308 ".Li386_less_signed_true:\n\t"
2310 ".Li386_less_signed_end:\n\t"
2312 "lea 0x8(%esp),%esp");
2316 i386_emit_less_unsigned (void)
2318 EMIT_ASM32 (i386_less_unsigned
,
2319 "cmpl %ebx,4(%esp)\n\t"
2320 "jb .Li386_less_unsigned_true\n\t"
2321 "jne .Li386_less_unsigned_false\n\t"
2322 "cmpl %eax,(%esp)\n\t"
2323 "jb .Li386_less_unsigned_true\n\t"
2324 ".Li386_less_unsigned_false:\n\t"
2326 "jmp .Li386_less_unsigned_end\n\t"
2327 ".Li386_less_unsigned_true:\n\t"
2329 ".Li386_less_unsigned_end:\n\t"
2331 "lea 0x8(%esp),%esp");
2335 i386_emit_ref (int size
)
2340 EMIT_ASM32 (i386_ref1
,
2344 EMIT_ASM32 (i386_ref2
,
2348 EMIT_ASM32 (i386_ref4
,
2349 "movl (%eax),%eax");
2352 EMIT_ASM32 (i386_ref8
,
2353 "movl 4(%eax),%ebx\n\t"
2354 "movl (%eax),%eax");
2360 i386_emit_if_goto (int *offset_p
, int *size_p
)
2362 EMIT_ASM32 (i386_if_goto
,
2368 /* Don't trust the assembler to choose the right jump */
2369 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2372 *offset_p
= 11; /* be sure that this matches the sequence above */
2378 i386_emit_goto (int *offset_p
, int *size_p
)
2380 EMIT_ASM32 (i386_goto
,
2381 /* Don't trust the assembler to choose the right jump */
2382 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2390 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2392 int diff
= (to
- (from
+ size
));
2393 unsigned char buf
[sizeof (int)];
2395 /* We're only doing 4-byte sizes at the moment. */
2402 memcpy (buf
, &diff
, sizeof (int));
2403 target_write_memory (from
, buf
, sizeof (int));
2407 i386_emit_const (LONGEST num
)
2409 unsigned char buf
[16];
2411 CORE_ADDR buildaddr
= current_insn_ptr
;
2414 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2415 lo
= num
& 0xffffffff;
2416 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2418 hi
= ((num
>> 32) & 0xffffffff);
2421 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2422 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2427 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2429 append_insns (&buildaddr
, i
, buf
);
2430 current_insn_ptr
= buildaddr
;
2434 i386_emit_call (CORE_ADDR fn
)
2436 unsigned char buf
[16];
2438 CORE_ADDR buildaddr
;
2440 buildaddr
= current_insn_ptr
;
2442 buf
[i
++] = 0xe8; /* call <reladdr> */
2443 offset
= ((int) fn
) - (buildaddr
+ 5);
2444 memcpy (buf
+ 1, &offset
, 4);
2445 append_insns (&buildaddr
, 5, buf
);
2446 current_insn_ptr
= buildaddr
;
2450 i386_emit_reg (int reg
)
2452 unsigned char buf
[16];
2454 CORE_ADDR buildaddr
;
2456 EMIT_ASM32 (i386_reg_a
,
2458 buildaddr
= current_insn_ptr
;
2460 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2461 memcpy (&buf
[i
], ®
, sizeof (reg
));
2463 append_insns (&buildaddr
, i
, buf
);
2464 current_insn_ptr
= buildaddr
;
2465 EMIT_ASM32 (i386_reg_b
,
2466 "mov %eax,4(%esp)\n\t"
2467 "mov 8(%ebp),%eax\n\t"
2469 i386_emit_call (get_raw_reg_func_addr ());
2470 EMIT_ASM32 (i386_reg_c
,
2472 "lea 0x8(%esp),%esp");
2476 i386_emit_pop (void)
2478 EMIT_ASM32 (i386_pop
,
2484 i386_emit_stack_flush (void)
2486 EMIT_ASM32 (i386_stack_flush
,
2492 i386_emit_zero_ext (int arg
)
2497 EMIT_ASM32 (i386_zero_ext_8
,
2498 "and $0xff,%eax\n\t"
2502 EMIT_ASM32 (i386_zero_ext_16
,
2503 "and $0xffff,%eax\n\t"
2507 EMIT_ASM32 (i386_zero_ext_32
,
2516 i386_emit_swap (void)
2518 EMIT_ASM32 (i386_swap
,
2528 i386_emit_stack_adjust (int n
)
2530 unsigned char buf
[16];
2532 CORE_ADDR buildaddr
= current_insn_ptr
;
2535 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2539 append_insns (&buildaddr
, i
, buf
);
2540 current_insn_ptr
= buildaddr
;
2543 /* FN's prototype is `LONGEST(*fn)(int)'. */
2546 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2548 unsigned char buf
[16];
2550 CORE_ADDR buildaddr
;
2552 EMIT_ASM32 (i386_int_call_1_a
,
2553 /* Reserve a bit of stack space. */
2555 /* Put the one argument on the stack. */
2556 buildaddr
= current_insn_ptr
;
2558 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2561 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2563 append_insns (&buildaddr
, i
, buf
);
2564 current_insn_ptr
= buildaddr
;
2565 i386_emit_call (fn
);
2566 EMIT_ASM32 (i386_int_call_1_c
,
2568 "lea 0x8(%esp),%esp");
2571 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2574 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2576 unsigned char buf
[16];
2578 CORE_ADDR buildaddr
;
2580 EMIT_ASM32 (i386_void_call_2_a
,
2581 /* Preserve %eax only; we don't have to worry about %ebx. */
2583 /* Reserve a bit of stack space for arguments. */
2584 "sub $0x10,%esp\n\t"
2585 /* Copy "top" to the second argument position. (Note that
2586 we can't assume function won't scribble on its
2587 arguments, so don't try to restore from this.) */
2588 "mov %eax,4(%esp)\n\t"
2589 "mov %ebx,8(%esp)");
2590 /* Put the first argument on the stack. */
2591 buildaddr
= current_insn_ptr
;
2593 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2596 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2598 append_insns (&buildaddr
, i
, buf
);
2599 current_insn_ptr
= buildaddr
;
2600 i386_emit_call (fn
);
2601 EMIT_ASM32 (i386_void_call_2_b
,
2602 "lea 0x10(%esp),%esp\n\t"
2603 /* Restore original stack top. */
2609 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2612 /* Check low half first, more likely to be decider */
2613 "cmpl %eax,(%esp)\n\t"
2614 "jne .Leq_fallthru\n\t"
2615 "cmpl %ebx,4(%esp)\n\t"
2616 "jne .Leq_fallthru\n\t"
2617 "lea 0x8(%esp),%esp\n\t"
2620 /* jmp, but don't trust the assembler to choose the right jump */
2621 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2622 ".Leq_fallthru:\n\t"
2623 "lea 0x8(%esp),%esp\n\t"
2634 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2637 /* Check low half first, more likely to be decider */
2638 "cmpl %eax,(%esp)\n\t"
2640 "cmpl %ebx,4(%esp)\n\t"
2641 "je .Lne_fallthru\n\t"
2643 "lea 0x8(%esp),%esp\n\t"
2646 /* jmp, but don't trust the assembler to choose the right jump */
2647 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2648 ".Lne_fallthru:\n\t"
2649 "lea 0x8(%esp),%esp\n\t"
2660 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2663 "cmpl %ebx,4(%esp)\n\t"
2665 "jne .Llt_fallthru\n\t"
2666 "cmpl %eax,(%esp)\n\t"
2667 "jnl .Llt_fallthru\n\t"
2669 "lea 0x8(%esp),%esp\n\t"
2672 /* jmp, but don't trust the assembler to choose the right jump */
2673 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2674 ".Llt_fallthru:\n\t"
2675 "lea 0x8(%esp),%esp\n\t"
2686 i386_emit_le_goto (int *offset_p
, int *size_p
)
2689 "cmpl %ebx,4(%esp)\n\t"
2691 "jne .Lle_fallthru\n\t"
2692 "cmpl %eax,(%esp)\n\t"
2693 "jnle .Lle_fallthru\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2698 /* jmp, but don't trust the assembler to choose the right jump */
2699 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2700 ".Lle_fallthru:\n\t"
2701 "lea 0x8(%esp),%esp\n\t"
2712 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2715 "cmpl %ebx,4(%esp)\n\t"
2717 "jne .Lgt_fallthru\n\t"
2718 "cmpl %eax,(%esp)\n\t"
2719 "jng .Lgt_fallthru\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2724 /* jmp, but don't trust the assembler to choose the right jump */
2725 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2726 ".Lgt_fallthru:\n\t"
2727 "lea 0x8(%esp),%esp\n\t"
2738 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2741 "cmpl %ebx,4(%esp)\n\t"
2743 "jne .Lge_fallthru\n\t"
2744 "cmpl %eax,(%esp)\n\t"
2745 "jnge .Lge_fallthru\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2750 /* jmp, but don't trust the assembler to choose the right jump */
2751 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2752 ".Lge_fallthru:\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2763 struct emit_ops i386_emit_ops
=
2771 i386_emit_rsh_signed
,
2772 i386_emit_rsh_unsigned
,
2780 i386_emit_less_signed
,
2781 i386_emit_less_unsigned
,
2785 i386_write_goto_address
,
2790 i386_emit_stack_flush
,
2793 i386_emit_stack_adjust
,
2794 i386_emit_int_call_1
,
2795 i386_emit_void_call_2
,
2805 static struct emit_ops
*
2809 if (is_64bit_tdesc ())
2810 return &amd64_emit_ops
;
2813 return &i386_emit_ops
;
2816 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2818 static const gdb_byte
*
2819 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2821 *size
= x86_breakpoint_len
;
2822 return x86_breakpoint
;
2826 x86_supports_range_stepping (void)
2831 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2835 x86_supports_hardware_single_step (void)
2841 x86_get_ipa_tdesc_idx (void)
2843 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2844 const struct target_desc
*tdesc
= regcache
->tdesc
;
2847 return amd64_get_ipa_tdesc_idx (tdesc
);
2850 if (tdesc
== tdesc_i386_linux_no_xml
)
2851 return X86_TDESC_SSE
;
2853 return i386_get_ipa_tdesc_idx (tdesc
);
2856 /* This is initialized assuming an amd64 target.
2857 x86_arch_setup will correct it for i386 or amd64 targets. */
2859 struct linux_target_ops the_low_target
=
2862 x86_linux_regs_info
,
2863 x86_cannot_fetch_register
,
2864 x86_cannot_store_register
,
2865 NULL
, /* fetch_register */
2868 NULL
, /* breakpoint_kind_from_pc */
2869 x86_sw_breakpoint_from_kind
,
2873 x86_supports_z_point_type
,
2876 x86_stopped_by_watchpoint
,
2877 x86_stopped_data_address
,
2878 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2879 native i386 case (no registers smaller than an xfer unit), and are not
2880 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2883 /* need to fix up i386 siginfo if host is amd64 */
2885 x86_linux_new_process
,
2886 x86_linux_delete_process
,
2887 x86_linux_new_thread
,
2888 x86_linux_delete_thread
,
2890 x86_linux_prepare_to_resume
,
2891 x86_linux_process_qsupported
,
2892 x86_supports_tracepoints
,
2893 x86_get_thread_area
,
2894 x86_install_fast_tracepoint_jump_pad
,
2896 x86_get_min_fast_tracepoint_insn_len
,
2897 x86_supports_range_stepping
,
2898 NULL
, /* breakpoint_kind_from_current_state */
2899 x86_supports_hardware_single_step
,
2900 x86_get_syscall_trapinfo
,
2901 x86_get_ipa_tdesc_idx
,
2905 initialize_low_arch (void)
2907 /* Initialize the Linux target descriptions. */
2909 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2910 copy_target_description (tdesc_amd64_linux_no_xml
,
2911 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2913 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2916 tdesc_i386_linux_no_xml
= allocate_target_description ();
2917 copy_target_description (tdesc_i386_linux_no_xml
,
2918 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2919 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2921 initialize_regsets_info (&x86_regsets_info
);