1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
137 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
138 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1
149 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
150 #define X86_64_USER_REGS (GS + 1)
152 #else /* ! __x86_64__ */
154 /* Mapping between the general-purpose registers in `struct user'
155 format and GDB's register array layout. */
156 static /*const*/ int i386_regmap
[] =
158 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
159 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
160 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
161 DS
* 4, ES
* 4, FS
* 4, GS
* 4
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
172 /* Returns true if the current inferior belongs to a x86-64 process,
176 is_64bit_tdesc (void)
178 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
180 return register_size (regcache
->tdesc
, 0) == 8;
186 /* Called by libthread_db. */
189 ps_get_thread_area (const struct ps_prochandle
*ph
,
190 lwpid_t lwpid
, int idx
, void **base
)
193 int use_64bit
= is_64bit_tdesc ();
200 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
204 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
215 unsigned int desc
[4];
217 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
218 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
221 /* Ensure we properly extend the value to 64-bits for x86_64. */
222 *base
= (void *) (uintptr_t) desc
[1];
227 /* Get the thread area address. This is used to recognize which
228 thread is which when tracing with the in-process agent library. We
229 don't read anything from the address, and treat it as opaque; it's
230 the address itself that we assume is unique per-thread. */
233 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
236 int use_64bit
= is_64bit_tdesc ();
241 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
243 *addr
= (CORE_ADDR
) (uintptr_t) base
;
252 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
253 struct thread_info
*thr
= get_lwp_thread (lwp
);
254 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
255 unsigned int desc
[4];
257 const int reg_thread_area
= 3; /* bits to scale down register value. */
260 collect_register_by_name (regcache
, "gs", &gs
);
262 idx
= gs
>> reg_thread_area
;
264 if (ptrace (PTRACE_GET_THREAD_AREA
,
266 (void *) (long) idx
, (unsigned long) &desc
) < 0)
277 x86_cannot_store_register (int regno
)
280 if (is_64bit_tdesc ())
284 return regno
>= I386_NUM_REGS
;
288 x86_cannot_fetch_register (int regno
)
291 if (is_64bit_tdesc ())
295 return regno
>= I386_NUM_REGS
;
299 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
304 if (register_size (regcache
->tdesc
, 0) == 8)
306 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
307 if (x86_64_regmap
[i
] != -1)
308 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
312 /* 32-bit inferior registers need to be zero-extended.
313 Callers would read uninitialized memory otherwise. */
314 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
317 for (i
= 0; i
< I386_NUM_REGS
; i
++)
318 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
320 collect_register_by_name (regcache
, "orig_eax",
321 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
325 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
330 if (register_size (regcache
->tdesc
, 0) == 8)
332 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
333 if (x86_64_regmap
[i
] != -1)
334 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
339 for (i
= 0; i
< I386_NUM_REGS
; i
++)
340 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
342 supply_register_by_name (regcache
, "orig_eax",
343 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
347 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
350 i387_cache_to_fxsave (regcache
, buf
);
352 i387_cache_to_fsave (regcache
, buf
);
357 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
360 i387_fxsave_to_cache (regcache
, buf
);
362 i387_fsave_to_cache (regcache
, buf
);
369 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
371 i387_cache_to_fxsave (regcache
, buf
);
375 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
377 i387_fxsave_to_cache (regcache
, buf
);
383 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
385 i387_cache_to_xsave (regcache
, buf
);
389 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
391 i387_xsave_to_cache (regcache
, buf
);
394 /* ??? The non-biarch i386 case stores all the i387 regs twice.
395 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
396 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
397 doesn't work. IWBN to avoid the duplication in the case where it
398 does work. Maybe the arch_setup routine could check whether it works
399 and update the supported regsets accordingly. */
401 static struct regset_info x86_regsets
[] =
403 #ifdef HAVE_PTRACE_GETREGS
404 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
406 x86_fill_gregset
, x86_store_gregset
},
407 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
408 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
410 # ifdef HAVE_PTRACE_GETFPXREGS
411 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
413 x86_fill_fpxregset
, x86_store_fpxregset
},
416 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
418 x86_fill_fpregset
, x86_store_fpregset
},
419 #endif /* HAVE_PTRACE_GETREGS */
424 x86_get_pc (struct regcache
*regcache
)
426 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
431 collect_register_by_name (regcache
, "rip", &pc
);
432 return (CORE_ADDR
) pc
;
437 collect_register_by_name (regcache
, "eip", &pc
);
438 return (CORE_ADDR
) pc
;
443 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
445 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
449 unsigned long newpc
= pc
;
450 supply_register_by_name (regcache
, "rip", &newpc
);
454 unsigned int newpc
= pc
;
455 supply_register_by_name (regcache
, "eip", &newpc
);
459 static const gdb_byte x86_breakpoint
[] = { 0xCC };
460 #define x86_breakpoint_len 1
463 x86_breakpoint_at (CORE_ADDR pc
)
467 (*the_target
->read_memory
) (pc
, &c
, 1);
474 /* Low-level function vector. */
475 struct x86_dr_low_type x86_dr_low
=
477 x86_linux_dr_set_control
,
478 x86_linux_dr_set_addr
,
479 x86_linux_dr_get_addr
,
480 x86_linux_dr_get_status
,
481 x86_linux_dr_get_control
,
485 /* Breakpoint/Watchpoint support. */
488 x86_supports_z_point_type (char z_type
)
494 case Z_PACKET_WRITE_WP
:
495 case Z_PACKET_ACCESS_WP
:
503 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
504 int size
, struct raw_breakpoint
*bp
)
506 struct process_info
*proc
= current_process ();
510 case raw_bkpt_type_hw
:
511 case raw_bkpt_type_write_wp
:
512 case raw_bkpt_type_access_wp
:
514 enum target_hw_bp_type hw_type
515 = raw_bkpt_type_to_target_hw_bp_type (type
);
516 struct x86_debug_reg_state
*state
517 = &proc
->priv
->arch_private
->debug_reg_state
;
519 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
529 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
530 int size
, struct raw_breakpoint
*bp
)
532 struct process_info
*proc
= current_process ();
536 case raw_bkpt_type_hw
:
537 case raw_bkpt_type_write_wp
:
538 case raw_bkpt_type_access_wp
:
540 enum target_hw_bp_type hw_type
541 = raw_bkpt_type_to_target_hw_bp_type (type
);
542 struct x86_debug_reg_state
*state
543 = &proc
->priv
->arch_private
->debug_reg_state
;
545 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
554 x86_stopped_by_watchpoint (void)
556 struct process_info
*proc
= current_process ();
557 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
561 x86_stopped_data_address (void)
563 struct process_info
*proc
= current_process ();
565 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
571 /* Called when a new process is created. */
573 static struct arch_process_info
*
574 x86_linux_new_process (void)
576 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
578 x86_low_init_dregs (&info
->debug_reg_state
);
583 /* Target routine for linux_new_fork. */
586 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
588 /* These are allocated by linux_add_process. */
589 gdb_assert (parent
->priv
!= NULL
590 && parent
->priv
->arch_private
!= NULL
);
591 gdb_assert (child
->priv
!= NULL
592 && child
->priv
->arch_private
!= NULL
);
594 /* Linux kernel before 2.6.33 commit
595 72f674d203cd230426437cdcf7dd6f681dad8b0d
596 will inherit hardware debug registers from parent
597 on fork/vfork/clone. Newer Linux kernels create such tasks with
598 zeroed debug registers.
600 GDB core assumes the child inherits the watchpoints/hw
601 breakpoints of the parent, and will remove them all from the
602 forked off process. Copy the debug registers mirrors into the
603 new process so that all breakpoints and watchpoints can be
604 removed together. The debug registers mirror will become zeroed
605 in the end before detaching the forked off process, thus making
606 this compatible with older Linux kernels too. */
608 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
611 /* See nat/x86-dregs.h. */
613 struct x86_debug_reg_state
*
614 x86_debug_reg_state (pid_t pid
)
616 struct process_info
*proc
= find_process_pid (pid
);
618 return &proc
->priv
->arch_private
->debug_reg_state
;
621 /* When GDBSERVER is built as a 64-bit application on linux, the
622 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
623 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
624 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
625 conversion in-place ourselves. */
627 /* Convert a native/host siginfo object, into/from the siginfo in the
628 layout of the inferiors' architecture. Returns true if any
629 conversion was done; false otherwise. If DIRECTION is 1, then copy
630 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
634 x86_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
637 unsigned int machine
;
638 int tid
= lwpid_of (current_thread
);
639 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
641 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
642 if (!is_64bit_tdesc ())
643 return amd64_linux_siginfo_fixup_common (native
, inf
, direction
,
645 /* No fixup for native x32 GDB. */
646 else if (!is_elf64
&& sizeof (void *) == 8)
647 return amd64_linux_siginfo_fixup_common (native
, inf
, direction
,
656 /* Format of XSAVE extended state is:
660 sw_usable_bytes[464..511]
661 xstate_hdr_bytes[512..575]
666 Same memory layout will be used for the coredump NT_X86_XSTATE
667 representing the XSAVE extended state registers.
669 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
670 extended state mask, which is the same as the extended control register
671 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
672 together with the mask saved in the xstate_hdr_bytes to determine what
673 states the processor/OS supports and what state, used or initialized,
674 the process/thread is in. */
675 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
677 /* Does the current host support the GETFPXREGS request? The header
678 file may or may not define it, and even if it is defined, the
679 kernel will return EIO if it's running on a pre-SSE processor. */
680 int have_ptrace_getfpxregs
=
681 #ifdef HAVE_PTRACE_GETFPXREGS
688 /* Get Linux/x86 target description from running target. */
690 static const struct target_desc
*
691 x86_linux_read_description (void)
693 unsigned int machine
;
697 static uint64_t xcr0
;
698 struct regset_info
*regset
;
700 tid
= lwpid_of (current_thread
);
702 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
704 if (sizeof (void *) == 4)
707 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
709 else if (machine
== EM_X86_64
)
710 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
714 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
715 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
717 elf_fpxregset_t fpxregs
;
719 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
721 have_ptrace_getfpxregs
= 0;
722 have_ptrace_getregset
= 0;
723 return tdesc_i386_mmx_linux
;
726 have_ptrace_getfpxregs
= 1;
732 x86_xcr0
= X86_XSTATE_SSE_MASK
;
736 if (machine
== EM_X86_64
)
737 return tdesc_amd64_linux_no_xml
;
740 return tdesc_i386_linux_no_xml
;
743 if (have_ptrace_getregset
== -1)
745 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
748 iov
.iov_base
= xstateregs
;
749 iov
.iov_len
= sizeof (xstateregs
);
751 /* Check if PTRACE_GETREGSET works. */
752 if (ptrace (PTRACE_GETREGSET
, tid
,
753 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
754 have_ptrace_getregset
= 0;
757 have_ptrace_getregset
= 1;
759 /* Get XCR0 from XSAVE extended state. */
760 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
761 / sizeof (uint64_t))];
763 /* Use PTRACE_GETREGSET if it is available. */
764 for (regset
= x86_regsets
;
765 regset
->fill_function
!= NULL
; regset
++)
766 if (regset
->get_request
== PTRACE_GETREGSET
)
767 regset
->size
= X86_XSTATE_SIZE (xcr0
);
768 else if (regset
->type
!= GENERAL_REGS
)
773 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
774 xcr0_features
= (have_ptrace_getregset
775 && (xcr0
& X86_XSTATE_ALL_MASK
));
780 if (machine
== EM_X86_64
)
787 switch (xcr0
& X86_XSTATE_ALL_MASK
)
789 case X86_XSTATE_AVX512_MASK
:
790 return tdesc_amd64_avx512_linux
;
792 case X86_XSTATE_MPX_MASK
:
793 return tdesc_amd64_mpx_linux
;
795 case X86_XSTATE_AVX_MASK
:
796 return tdesc_amd64_avx_linux
;
799 return tdesc_amd64_linux
;
803 return tdesc_amd64_linux
;
809 switch (xcr0
& X86_XSTATE_ALL_MASK
)
811 case X86_XSTATE_AVX512_MASK
:
812 return tdesc_x32_avx512_linux
;
814 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
815 case X86_XSTATE_AVX_MASK
:
816 return tdesc_x32_avx_linux
;
819 return tdesc_x32_linux
;
823 return tdesc_x32_linux
;
831 switch (xcr0
& X86_XSTATE_ALL_MASK
)
833 case (X86_XSTATE_AVX512_MASK
):
834 return tdesc_i386_avx512_linux
;
836 case (X86_XSTATE_MPX_MASK
):
837 return tdesc_i386_mpx_linux
;
839 case (X86_XSTATE_AVX_MASK
):
840 return tdesc_i386_avx_linux
;
843 return tdesc_i386_linux
;
847 return tdesc_i386_linux
;
850 gdb_assert_not_reached ("failed to return tdesc");
853 /* Callback for find_inferior. Stops iteration when a thread with a
854 given PID is found. */
857 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
859 int pid
= *(int *) data
;
861 return (ptid_get_pid (entry
->id
) == pid
);
864 /* Callback for for_each_inferior. Calls the arch_setup routine for
868 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
870 int pid
= ptid_get_pid (entry
->id
);
872 /* Look up any thread of this processes. */
874 = (struct thread_info
*) find_inferior (&all_threads
,
875 same_process_callback
, &pid
);
877 the_low_target
.arch_setup ();
880 /* Update all the target description of all processes; a new GDB
881 connected, and it may or not support xml target descriptions. */
884 x86_linux_update_xmltarget (void)
886 struct thread_info
*saved_thread
= current_thread
;
888 /* Before changing the register cache's internal layout, flush the
889 contents of the current valid caches back to the threads, and
890 release the current regcache objects. */
893 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
895 current_thread
= saved_thread
;
898 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
902 x86_linux_process_qsupported (char **features
, int count
)
906 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
907 with "i386" in qSupported query, it supports x86 XML target
910 for (i
= 0; i
< count
; i
++)
912 const char *feature
= features
[i
];
914 if (startswith (feature
, "xmlRegisters="))
916 char *copy
= xstrdup (feature
+ 13);
919 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
921 if (strcmp (p
, "i386") == 0)
931 x86_linux_update_xmltarget ();
934 /* Common for x86/x86-64. */
936 static struct regsets_info x86_regsets_info
=
938 x86_regsets
, /* regsets */
940 NULL
, /* disabled_regsets */
944 static struct regs_info amd64_linux_regs_info
=
946 NULL
, /* regset_bitmap */
947 NULL
, /* usrregs_info */
951 static struct usrregs_info i386_linux_usrregs_info
=
957 static struct regs_info i386_linux_regs_info
=
959 NULL
, /* regset_bitmap */
960 &i386_linux_usrregs_info
,
964 const struct regs_info
*
965 x86_linux_regs_info (void)
968 if (is_64bit_tdesc ())
969 return &amd64_linux_regs_info
;
972 return &i386_linux_regs_info
;
975 /* Initialize the target description for the architecture of the
979 x86_arch_setup (void)
981 current_process ()->tdesc
= x86_linux_read_description ();
984 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
985 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
988 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
, int *sysret
)
990 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
997 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
998 collect_register_by_name (regcache
, "rax", &l_sysret
);
999 *sysno
= (int) l_sysno
;
1000 *sysret
= (int) l_sysret
;
1004 collect_register_by_name (regcache
, "orig_eax", sysno
);
1005 collect_register_by_name (regcache
, "eax", sysret
);
1010 x86_supports_tracepoints (void)
1016 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1018 write_inferior_memory (*to
, buf
, len
);
1023 push_opcode (unsigned char *buf
, char *op
)
1025 unsigned char *buf_org
= buf
;
1030 unsigned long ul
= strtoul (op
, &endptr
, 16);
1039 return buf
- buf_org
;
1044 /* Build a jump pad that saves registers and calls a collection
1045 function. Writes a jump instruction to the jump pad to
1046 JJUMPAD_INSN. The caller is responsible to write it in at the
1047 tracepoint address. */
1050 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1051 CORE_ADDR collector
,
1054 CORE_ADDR
*jump_entry
,
1055 CORE_ADDR
*trampoline
,
1056 ULONGEST
*trampoline_size
,
1057 unsigned char *jjump_pad_insn
,
1058 ULONGEST
*jjump_pad_insn_size
,
1059 CORE_ADDR
*adjusted_insn_addr
,
1060 CORE_ADDR
*adjusted_insn_addr_end
,
1063 unsigned char buf
[40];
1067 CORE_ADDR buildaddr
= *jump_entry
;
1069 /* Build the jump pad. */
1071 /* First, do tracepoint data collection. Save registers. */
1073 /* Need to ensure stack pointer saved first. */
1074 buf
[i
++] = 0x54; /* push %rsp */
1075 buf
[i
++] = 0x55; /* push %rbp */
1076 buf
[i
++] = 0x57; /* push %rdi */
1077 buf
[i
++] = 0x56; /* push %rsi */
1078 buf
[i
++] = 0x52; /* push %rdx */
1079 buf
[i
++] = 0x51; /* push %rcx */
1080 buf
[i
++] = 0x53; /* push %rbx */
1081 buf
[i
++] = 0x50; /* push %rax */
1082 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1083 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1084 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1085 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1086 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1087 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1088 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1089 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1090 buf
[i
++] = 0x9c; /* pushfq */
1091 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1093 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1094 i
+= sizeof (unsigned long);
1095 buf
[i
++] = 0x57; /* push %rdi */
1096 append_insns (&buildaddr
, i
, buf
);
1098 /* Stack space for the collecting_t object. */
1100 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1101 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1102 memcpy (buf
+ i
, &tpoint
, 8);
1104 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1105 i
+= push_opcode (&buf
[i
],
1106 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1107 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1108 append_insns (&buildaddr
, i
, buf
);
1112 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1113 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1115 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1116 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1117 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1118 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1119 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1120 append_insns (&buildaddr
, i
, buf
);
1122 /* Set up the gdb_collect call. */
1123 /* At this point, (stack pointer + 0x18) is the base of our saved
1127 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1128 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1130 /* tpoint address may be 64-bit wide. */
1131 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1132 memcpy (buf
+ i
, &tpoint
, 8);
1134 append_insns (&buildaddr
, i
, buf
);
1136 /* The collector function being in the shared library, may be
1137 >31-bits away off the jump pad. */
1139 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1140 memcpy (buf
+ i
, &collector
, 8);
1142 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1143 append_insns (&buildaddr
, i
, buf
);
1145 /* Clear the spin-lock. */
1147 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1148 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1149 memcpy (buf
+ i
, &lockaddr
, 8);
1151 append_insns (&buildaddr
, i
, buf
);
1153 /* Remove stack that had been used for the collect_t object. */
1155 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1156 append_insns (&buildaddr
, i
, buf
);
1158 /* Restore register state. */
1160 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1164 buf
[i
++] = 0x9d; /* popfq */
1165 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1166 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1167 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1168 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1169 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1170 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1171 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1172 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1173 buf
[i
++] = 0x58; /* pop %rax */
1174 buf
[i
++] = 0x5b; /* pop %rbx */
1175 buf
[i
++] = 0x59; /* pop %rcx */
1176 buf
[i
++] = 0x5a; /* pop %rdx */
1177 buf
[i
++] = 0x5e; /* pop %rsi */
1178 buf
[i
++] = 0x5f; /* pop %rdi */
1179 buf
[i
++] = 0x5d; /* pop %rbp */
1180 buf
[i
++] = 0x5c; /* pop %rsp */
1181 append_insns (&buildaddr
, i
, buf
);
1183 /* Now, adjust the original instruction to execute in the jump
1185 *adjusted_insn_addr
= buildaddr
;
1186 relocate_instruction (&buildaddr
, tpaddr
);
1187 *adjusted_insn_addr_end
= buildaddr
;
1189 /* Finally, write a jump back to the program. */
1191 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1192 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1195 "E.Jump back from jump pad too far from tracepoint "
1196 "(offset 0x%" PRIx64
" > int32).", loffset
);
1200 offset
= (int) loffset
;
1201 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1202 memcpy (buf
+ 1, &offset
, 4);
1203 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1205 /* The jump pad is now built. Wire in a jump to our jump pad. This
1206 is always done last (by our caller actually), so that we can
1207 install fast tracepoints with threads running. This relies on
1208 the agent's atomic write support. */
1209 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1210 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1213 "E.Jump pad too far from tracepoint "
1214 "(offset 0x%" PRIx64
" > int32).", loffset
);
1218 offset
= (int) loffset
;
1220 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1221 memcpy (buf
+ 1, &offset
, 4);
1222 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1223 *jjump_pad_insn_size
= sizeof (jump_insn
);
1225 /* Return the end address of our pad. */
1226 *jump_entry
= buildaddr
;
1231 #endif /* __x86_64__ */
1233 /* Build a jump pad that saves registers and calls a collection
1234 function. Writes a jump instruction to the jump pad to
1235 JJUMPAD_INSN. The caller is responsible to write it in at the
1236 tracepoint address. */
1239 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1240 CORE_ADDR collector
,
1243 CORE_ADDR
*jump_entry
,
1244 CORE_ADDR
*trampoline
,
1245 ULONGEST
*trampoline_size
,
1246 unsigned char *jjump_pad_insn
,
1247 ULONGEST
*jjump_pad_insn_size
,
1248 CORE_ADDR
*adjusted_insn_addr
,
1249 CORE_ADDR
*adjusted_insn_addr_end
,
1252 unsigned char buf
[0x100];
1254 CORE_ADDR buildaddr
= *jump_entry
;
1256 /* Build the jump pad. */
1258 /* First, do tracepoint data collection. Save registers. */
1260 buf
[i
++] = 0x60; /* pushad */
1261 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1262 *((int *)(buf
+ i
)) = (int) tpaddr
;
1264 buf
[i
++] = 0x9c; /* pushf */
1265 buf
[i
++] = 0x1e; /* push %ds */
1266 buf
[i
++] = 0x06; /* push %es */
1267 buf
[i
++] = 0x0f; /* push %fs */
1269 buf
[i
++] = 0x0f; /* push %gs */
1271 buf
[i
++] = 0x16; /* push %ss */
1272 buf
[i
++] = 0x0e; /* push %cs */
1273 append_insns (&buildaddr
, i
, buf
);
1275 /* Stack space for the collecting_t object. */
1277 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1279 /* Build the object. */
1280 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1281 memcpy (buf
+ i
, &tpoint
, 4);
1283 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1285 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1286 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1287 append_insns (&buildaddr
, i
, buf
);
1289 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1290 If we cared for it, this could be using xchg alternatively. */
1293 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1294 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1296 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1298 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1299 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1300 append_insns (&buildaddr
, i
, buf
);
1303 /* Set up arguments to the gdb_collect call. */
1305 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1306 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1307 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1308 append_insns (&buildaddr
, i
, buf
);
1311 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1312 append_insns (&buildaddr
, i
, buf
);
1315 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1316 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1318 append_insns (&buildaddr
, i
, buf
);
1320 buf
[0] = 0xe8; /* call <reladdr> */
1321 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1322 memcpy (buf
+ 1, &offset
, 4);
1323 append_insns (&buildaddr
, 5, buf
);
1324 /* Clean up after the call. */
1325 buf
[0] = 0x83; /* add $0x8,%esp */
1328 append_insns (&buildaddr
, 3, buf
);
1331 /* Clear the spin-lock. This would need the LOCK prefix on older
1334 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1335 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1336 memcpy (buf
+ i
, &lockaddr
, 4);
1338 append_insns (&buildaddr
, i
, buf
);
1341 /* Remove stack that had been used for the collect_t object. */
1343 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1344 append_insns (&buildaddr
, i
, buf
);
1347 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1350 buf
[i
++] = 0x17; /* pop %ss */
1351 buf
[i
++] = 0x0f; /* pop %gs */
1353 buf
[i
++] = 0x0f; /* pop %fs */
1355 buf
[i
++] = 0x07; /* pop %es */
1356 buf
[i
++] = 0x1f; /* pop %ds */
1357 buf
[i
++] = 0x9d; /* popf */
1358 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1361 buf
[i
++] = 0x61; /* popad */
1362 append_insns (&buildaddr
, i
, buf
);
1364 /* Now, adjust the original instruction to execute in the jump
1366 *adjusted_insn_addr
= buildaddr
;
1367 relocate_instruction (&buildaddr
, tpaddr
);
1368 *adjusted_insn_addr_end
= buildaddr
;
1370 /* Write the jump back to the program. */
1371 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1372 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1373 memcpy (buf
+ 1, &offset
, 4);
1374 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1376 /* The jump pad is now built. Wire in a jump to our jump pad. This
1377 is always done last (by our caller actually), so that we can
1378 install fast tracepoints with threads running. This relies on
1379 the agent's atomic write support. */
1382 /* Create a trampoline. */
1383 *trampoline_size
= sizeof (jump_insn
);
1384 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1386 /* No trampoline space available. */
1388 "E.Cannot allocate trampoline space needed for fast "
1389 "tracepoints on 4-byte instructions.");
1393 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1394 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1395 memcpy (buf
+ 1, &offset
, 4);
1396 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1398 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1399 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1400 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1401 memcpy (buf
+ 2, &offset
, 2);
1402 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1403 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1407 /* Else use a 32-bit relative jump instruction. */
1408 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1409 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1410 memcpy (buf
+ 1, &offset
, 4);
1411 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1412 *jjump_pad_insn_size
= sizeof (jump_insn
);
1415 /* Return the end address of our pad. */
1416 *jump_entry
= buildaddr
;
1422 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1423 CORE_ADDR collector
,
1426 CORE_ADDR
*jump_entry
,
1427 CORE_ADDR
*trampoline
,
1428 ULONGEST
*trampoline_size
,
1429 unsigned char *jjump_pad_insn
,
1430 ULONGEST
*jjump_pad_insn_size
,
1431 CORE_ADDR
*adjusted_insn_addr
,
1432 CORE_ADDR
*adjusted_insn_addr_end
,
1436 if (is_64bit_tdesc ())
1437 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1438 collector
, lockaddr
,
1439 orig_size
, jump_entry
,
1440 trampoline
, trampoline_size
,
1442 jjump_pad_insn_size
,
1444 adjusted_insn_addr_end
,
1448 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1449 collector
, lockaddr
,
1450 orig_size
, jump_entry
,
1451 trampoline
, trampoline_size
,
1453 jjump_pad_insn_size
,
1455 adjusted_insn_addr_end
,
1459 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1463 x86_get_min_fast_tracepoint_insn_len (void)
1465 static int warned_about_fast_tracepoints
= 0;
1468 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1469 used for fast tracepoints. */
1470 if (is_64bit_tdesc ())
1474 if (agent_loaded_p ())
1476 char errbuf
[IPA_BUFSIZ
];
1480 /* On x86, if trampolines are available, then 4-byte jump instructions
1481 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1482 with a 4-byte offset are used instead. */
1483 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1487 /* GDB has no channel to explain to user why a shorter fast
1488 tracepoint is not possible, but at least make GDBserver
1489 mention that something has gone awry. */
1490 if (!warned_about_fast_tracepoints
)
1492 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1493 warned_about_fast_tracepoints
= 1;
1500 /* Indicate that the minimum length is currently unknown since the IPA
1501 has not loaded yet. */
1507 add_insns (unsigned char *start
, int len
)
1509 CORE_ADDR buildaddr
= current_insn_ptr
;
1512 debug_printf ("Adding %d bytes of insn at %s\n",
1513 len
, paddress (buildaddr
));
1515 append_insns (&buildaddr
, len
, start
);
1516 current_insn_ptr
= buildaddr
;
1519 /* Our general strategy for emitting code is to avoid specifying raw
1520 bytes whenever possible, and instead copy a block of inline asm
1521 that is embedded in the function. This is a little messy, because
1522 we need to keep the compiler from discarding what looks like dead
1523 code, plus suppress various warnings. */
1525 #define EMIT_ASM(NAME, INSNS) \
1528 extern unsigned char start_ ## NAME, end_ ## NAME; \
1529 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1530 __asm__ ("jmp end_" #NAME "\n" \
1531 "\t" "start_" #NAME ":" \
1533 "\t" "end_" #NAME ":"); \
1538 #define EMIT_ASM32(NAME,INSNS) \
1541 extern unsigned char start_ ## NAME, end_ ## NAME; \
1542 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1543 __asm__ (".code32\n" \
1544 "\t" "jmp end_" #NAME "\n" \
1545 "\t" "start_" #NAME ":\n" \
1547 "\t" "end_" #NAME ":\n" \
1553 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1560 amd64_emit_prologue (void)
1562 EMIT_ASM (amd64_prologue
,
1564 "movq %rsp,%rbp\n\t"
1565 "sub $0x20,%rsp\n\t"
1566 "movq %rdi,-8(%rbp)\n\t"
1567 "movq %rsi,-16(%rbp)");
1572 amd64_emit_epilogue (void)
1574 EMIT_ASM (amd64_epilogue
,
1575 "movq -16(%rbp),%rdi\n\t"
1576 "movq %rax,(%rdi)\n\t"
1583 amd64_emit_add (void)
1585 EMIT_ASM (amd64_add
,
1586 "add (%rsp),%rax\n\t"
1587 "lea 0x8(%rsp),%rsp");
1591 amd64_emit_sub (void)
1593 EMIT_ASM (amd64_sub
,
1594 "sub %rax,(%rsp)\n\t"
1599 amd64_emit_mul (void)
1605 amd64_emit_lsh (void)
1611 amd64_emit_rsh_signed (void)
1617 amd64_emit_rsh_unsigned (void)
1623 amd64_emit_ext (int arg
)
1628 EMIT_ASM (amd64_ext_8
,
1634 EMIT_ASM (amd64_ext_16
,
1639 EMIT_ASM (amd64_ext_32
,
1648 amd64_emit_log_not (void)
1650 EMIT_ASM (amd64_log_not
,
1651 "test %rax,%rax\n\t"
1657 amd64_emit_bit_and (void)
1659 EMIT_ASM (amd64_and
,
1660 "and (%rsp),%rax\n\t"
1661 "lea 0x8(%rsp),%rsp");
1665 amd64_emit_bit_or (void)
1668 "or (%rsp),%rax\n\t"
1669 "lea 0x8(%rsp),%rsp");
1673 amd64_emit_bit_xor (void)
1675 EMIT_ASM (amd64_xor
,
1676 "xor (%rsp),%rax\n\t"
1677 "lea 0x8(%rsp),%rsp");
1681 amd64_emit_bit_not (void)
1683 EMIT_ASM (amd64_bit_not
,
1684 "xorq $0xffffffffffffffff,%rax");
1688 amd64_emit_equal (void)
1690 EMIT_ASM (amd64_equal
,
1691 "cmp %rax,(%rsp)\n\t"
1692 "je .Lamd64_equal_true\n\t"
1694 "jmp .Lamd64_equal_end\n\t"
1695 ".Lamd64_equal_true:\n\t"
1697 ".Lamd64_equal_end:\n\t"
1698 "lea 0x8(%rsp),%rsp");
1702 amd64_emit_less_signed (void)
1704 EMIT_ASM (amd64_less_signed
,
1705 "cmp %rax,(%rsp)\n\t"
1706 "jl .Lamd64_less_signed_true\n\t"
1708 "jmp .Lamd64_less_signed_end\n\t"
1709 ".Lamd64_less_signed_true:\n\t"
1711 ".Lamd64_less_signed_end:\n\t"
1712 "lea 0x8(%rsp),%rsp");
1716 amd64_emit_less_unsigned (void)
1718 EMIT_ASM (amd64_less_unsigned
,
1719 "cmp %rax,(%rsp)\n\t"
1720 "jb .Lamd64_less_unsigned_true\n\t"
1722 "jmp .Lamd64_less_unsigned_end\n\t"
1723 ".Lamd64_less_unsigned_true:\n\t"
1725 ".Lamd64_less_unsigned_end:\n\t"
1726 "lea 0x8(%rsp),%rsp");
1730 amd64_emit_ref (int size
)
1735 EMIT_ASM (amd64_ref1
,
1739 EMIT_ASM (amd64_ref2
,
1743 EMIT_ASM (amd64_ref4
,
1744 "movl (%rax),%eax");
1747 EMIT_ASM (amd64_ref8
,
1748 "movq (%rax),%rax");
1754 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1756 EMIT_ASM (amd64_if_goto
,
1760 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1768 amd64_emit_goto (int *offset_p
, int *size_p
)
1770 EMIT_ASM (amd64_goto
,
1771 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1779 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1781 int diff
= (to
- (from
+ size
));
1782 unsigned char buf
[sizeof (int)];
1790 memcpy (buf
, &diff
, sizeof (int));
1791 write_inferior_memory (from
, buf
, sizeof (int));
1795 amd64_emit_const (LONGEST num
)
1797 unsigned char buf
[16];
1799 CORE_ADDR buildaddr
= current_insn_ptr
;
1802 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1803 memcpy (&buf
[i
], &num
, sizeof (num
));
1805 append_insns (&buildaddr
, i
, buf
);
1806 current_insn_ptr
= buildaddr
;
1810 amd64_emit_call (CORE_ADDR fn
)
1812 unsigned char buf
[16];
1814 CORE_ADDR buildaddr
;
1817 /* The destination function being in the shared library, may be
1818 >31-bits away off the compiled code pad. */
1820 buildaddr
= current_insn_ptr
;
1822 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1826 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1828 /* Offset is too large for a call. Use callq, but that requires
1829 a register, so avoid it if possible. Use r10, since it is
1830 call-clobbered, we don't have to push/pop it. */
1831 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1833 memcpy (buf
+ i
, &fn
, 8);
1835 buf
[i
++] = 0xff; /* callq *%r10 */
1840 int offset32
= offset64
; /* we know we can't overflow here. */
1841 memcpy (buf
+ i
, &offset32
, 4);
1845 append_insns (&buildaddr
, i
, buf
);
1846 current_insn_ptr
= buildaddr
;
1850 amd64_emit_reg (int reg
)
1852 unsigned char buf
[16];
1854 CORE_ADDR buildaddr
;
1856 /* Assume raw_regs is still in %rdi. */
1857 buildaddr
= current_insn_ptr
;
1859 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1860 memcpy (&buf
[i
], ®
, sizeof (reg
));
1862 append_insns (&buildaddr
, i
, buf
);
1863 current_insn_ptr
= buildaddr
;
1864 amd64_emit_call (get_raw_reg_func_addr ());
1868 amd64_emit_pop (void)
1870 EMIT_ASM (amd64_pop
,
1875 amd64_emit_stack_flush (void)
1877 EMIT_ASM (amd64_stack_flush
,
1882 amd64_emit_zero_ext (int arg
)
1887 EMIT_ASM (amd64_zero_ext_8
,
1891 EMIT_ASM (amd64_zero_ext_16
,
1892 "and $0xffff,%rax");
1895 EMIT_ASM (amd64_zero_ext_32
,
1896 "mov $0xffffffff,%rcx\n\t"
1905 amd64_emit_swap (void)
1907 EMIT_ASM (amd64_swap
,
1914 amd64_emit_stack_adjust (int n
)
1916 unsigned char buf
[16];
1918 CORE_ADDR buildaddr
= current_insn_ptr
;
1921 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1925 /* This only handles adjustments up to 16, but we don't expect any more. */
1927 append_insns (&buildaddr
, i
, buf
);
1928 current_insn_ptr
= buildaddr
;
1931 /* FN's prototype is `LONGEST(*fn)(int)'. */
1934 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1936 unsigned char buf
[16];
1938 CORE_ADDR buildaddr
;
1940 buildaddr
= current_insn_ptr
;
1942 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1943 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1945 append_insns (&buildaddr
, i
, buf
);
1946 current_insn_ptr
= buildaddr
;
1947 amd64_emit_call (fn
);
1950 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1953 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1955 unsigned char buf
[16];
1957 CORE_ADDR buildaddr
;
1959 buildaddr
= current_insn_ptr
;
1961 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1962 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1964 append_insns (&buildaddr
, i
, buf
);
1965 current_insn_ptr
= buildaddr
;
1966 EMIT_ASM (amd64_void_call_2_a
,
1967 /* Save away a copy of the stack top. */
1969 /* Also pass top as the second argument. */
1971 amd64_emit_call (fn
);
1972 EMIT_ASM (amd64_void_call_2_b
,
1973 /* Restore the stack top, %rax may have been trashed. */
1978 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1981 "cmp %rax,(%rsp)\n\t"
1982 "jne .Lamd64_eq_fallthru\n\t"
1983 "lea 0x8(%rsp),%rsp\n\t"
1985 /* jmp, but don't trust the assembler to choose the right jump */
1986 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1987 ".Lamd64_eq_fallthru:\n\t"
1988 "lea 0x8(%rsp),%rsp\n\t"
1998 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2001 "cmp %rax,(%rsp)\n\t"
2002 "je .Lamd64_ne_fallthru\n\t"
2003 "lea 0x8(%rsp),%rsp\n\t"
2005 /* jmp, but don't trust the assembler to choose the right jump */
2006 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2007 ".Lamd64_ne_fallthru:\n\t"
2008 "lea 0x8(%rsp),%rsp\n\t"
2018 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2021 "cmp %rax,(%rsp)\n\t"
2022 "jnl .Lamd64_lt_fallthru\n\t"
2023 "lea 0x8(%rsp),%rsp\n\t"
2025 /* jmp, but don't trust the assembler to choose the right jump */
2026 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2027 ".Lamd64_lt_fallthru:\n\t"
2028 "lea 0x8(%rsp),%rsp\n\t"
2038 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2041 "cmp %rax,(%rsp)\n\t"
2042 "jnle .Lamd64_le_fallthru\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2045 /* jmp, but don't trust the assembler to choose the right jump */
2046 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2047 ".Lamd64_le_fallthru:\n\t"
2048 "lea 0x8(%rsp),%rsp\n\t"
2058 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2061 "cmp %rax,(%rsp)\n\t"
2062 "jng .Lamd64_gt_fallthru\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2065 /* jmp, but don't trust the assembler to choose the right jump */
2066 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2067 ".Lamd64_gt_fallthru:\n\t"
2068 "lea 0x8(%rsp),%rsp\n\t"
2078 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2081 "cmp %rax,(%rsp)\n\t"
2082 "jnge .Lamd64_ge_fallthru\n\t"
2083 ".Lamd64_ge_jump:\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2086 /* jmp, but don't trust the assembler to choose the right jump */
2087 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2088 ".Lamd64_ge_fallthru:\n\t"
2089 "lea 0x8(%rsp),%rsp\n\t"
2098 struct emit_ops amd64_emit_ops
=
2100 amd64_emit_prologue
,
2101 amd64_emit_epilogue
,
2106 amd64_emit_rsh_signed
,
2107 amd64_emit_rsh_unsigned
,
2115 amd64_emit_less_signed
,
2116 amd64_emit_less_unsigned
,
2120 amd64_write_goto_address
,
2125 amd64_emit_stack_flush
,
2126 amd64_emit_zero_ext
,
2128 amd64_emit_stack_adjust
,
2129 amd64_emit_int_call_1
,
2130 amd64_emit_void_call_2
,
2139 #endif /* __x86_64__ */
2142 i386_emit_prologue (void)
2144 EMIT_ASM32 (i386_prologue
,
2148 /* At this point, the raw regs base address is at 8(%ebp), and the
2149 value pointer is at 12(%ebp). */
2153 i386_emit_epilogue (void)
2155 EMIT_ASM32 (i386_epilogue
,
2156 "mov 12(%ebp),%ecx\n\t"
2157 "mov %eax,(%ecx)\n\t"
2158 "mov %ebx,0x4(%ecx)\n\t"
2166 i386_emit_add (void)
2168 EMIT_ASM32 (i386_add
,
2169 "add (%esp),%eax\n\t"
2170 "adc 0x4(%esp),%ebx\n\t"
2171 "lea 0x8(%esp),%esp");
2175 i386_emit_sub (void)
2177 EMIT_ASM32 (i386_sub
,
2178 "subl %eax,(%esp)\n\t"
2179 "sbbl %ebx,4(%esp)\n\t"
2185 i386_emit_mul (void)
2191 i386_emit_lsh (void)
2197 i386_emit_rsh_signed (void)
2203 i386_emit_rsh_unsigned (void)
2209 i386_emit_ext (int arg
)
2214 EMIT_ASM32 (i386_ext_8
,
2217 "movl %eax,%ebx\n\t"
2221 EMIT_ASM32 (i386_ext_16
,
2223 "movl %eax,%ebx\n\t"
2227 EMIT_ASM32 (i386_ext_32
,
2228 "movl %eax,%ebx\n\t"
2237 i386_emit_log_not (void)
2239 EMIT_ASM32 (i386_log_not
,
2241 "test %eax,%eax\n\t"
2248 i386_emit_bit_and (void)
2250 EMIT_ASM32 (i386_and
,
2251 "and (%esp),%eax\n\t"
2252 "and 0x4(%esp),%ebx\n\t"
2253 "lea 0x8(%esp),%esp");
2257 i386_emit_bit_or (void)
2259 EMIT_ASM32 (i386_or
,
2260 "or (%esp),%eax\n\t"
2261 "or 0x4(%esp),%ebx\n\t"
2262 "lea 0x8(%esp),%esp");
2266 i386_emit_bit_xor (void)
2268 EMIT_ASM32 (i386_xor
,
2269 "xor (%esp),%eax\n\t"
2270 "xor 0x4(%esp),%ebx\n\t"
2271 "lea 0x8(%esp),%esp");
2275 i386_emit_bit_not (void)
2277 EMIT_ASM32 (i386_bit_not
,
2278 "xor $0xffffffff,%eax\n\t"
2279 "xor $0xffffffff,%ebx\n\t");
2283 i386_emit_equal (void)
2285 EMIT_ASM32 (i386_equal
,
2286 "cmpl %ebx,4(%esp)\n\t"
2287 "jne .Li386_equal_false\n\t"
2288 "cmpl %eax,(%esp)\n\t"
2289 "je .Li386_equal_true\n\t"
2290 ".Li386_equal_false:\n\t"
2292 "jmp .Li386_equal_end\n\t"
2293 ".Li386_equal_true:\n\t"
2295 ".Li386_equal_end:\n\t"
2297 "lea 0x8(%esp),%esp");
2301 i386_emit_less_signed (void)
2303 EMIT_ASM32 (i386_less_signed
,
2304 "cmpl %ebx,4(%esp)\n\t"
2305 "jl .Li386_less_signed_true\n\t"
2306 "jne .Li386_less_signed_false\n\t"
2307 "cmpl %eax,(%esp)\n\t"
2308 "jl .Li386_less_signed_true\n\t"
2309 ".Li386_less_signed_false:\n\t"
2311 "jmp .Li386_less_signed_end\n\t"
2312 ".Li386_less_signed_true:\n\t"
2314 ".Li386_less_signed_end:\n\t"
2316 "lea 0x8(%esp),%esp");
2320 i386_emit_less_unsigned (void)
2322 EMIT_ASM32 (i386_less_unsigned
,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jb .Li386_less_unsigned_true\n\t"
2325 "jne .Li386_less_unsigned_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "jb .Li386_less_unsigned_true\n\t"
2328 ".Li386_less_unsigned_false:\n\t"
2330 "jmp .Li386_less_unsigned_end\n\t"
2331 ".Li386_less_unsigned_true:\n\t"
2333 ".Li386_less_unsigned_end:\n\t"
2335 "lea 0x8(%esp),%esp");
2339 i386_emit_ref (int size
)
2344 EMIT_ASM32 (i386_ref1
,
2348 EMIT_ASM32 (i386_ref2
,
2352 EMIT_ASM32 (i386_ref4
,
2353 "movl (%eax),%eax");
2356 EMIT_ASM32 (i386_ref8
,
2357 "movl 4(%eax),%ebx\n\t"
2358 "movl (%eax),%eax");
2364 i386_emit_if_goto (int *offset_p
, int *size_p
)
2366 EMIT_ASM32 (i386_if_goto
,
2372 /* Don't trust the assembler to choose the right jump */
2373 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2376 *offset_p
= 11; /* be sure that this matches the sequence above */
2382 i386_emit_goto (int *offset_p
, int *size_p
)
2384 EMIT_ASM32 (i386_goto
,
2385 /* Don't trust the assembler to choose the right jump */
2386 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2394 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2396 int diff
= (to
- (from
+ size
));
2397 unsigned char buf
[sizeof (int)];
2399 /* We're only doing 4-byte sizes at the moment. */
2406 memcpy (buf
, &diff
, sizeof (int));
2407 write_inferior_memory (from
, buf
, sizeof (int));
2411 i386_emit_const (LONGEST num
)
2413 unsigned char buf
[16];
2415 CORE_ADDR buildaddr
= current_insn_ptr
;
2418 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2419 lo
= num
& 0xffffffff;
2420 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2422 hi
= ((num
>> 32) & 0xffffffff);
2425 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2426 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2431 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2433 append_insns (&buildaddr
, i
, buf
);
2434 current_insn_ptr
= buildaddr
;
2438 i386_emit_call (CORE_ADDR fn
)
2440 unsigned char buf
[16];
2442 CORE_ADDR buildaddr
;
2444 buildaddr
= current_insn_ptr
;
2446 buf
[i
++] = 0xe8; /* call <reladdr> */
2447 offset
= ((int) fn
) - (buildaddr
+ 5);
2448 memcpy (buf
+ 1, &offset
, 4);
2449 append_insns (&buildaddr
, 5, buf
);
2450 current_insn_ptr
= buildaddr
;
2454 i386_emit_reg (int reg
)
2456 unsigned char buf
[16];
2458 CORE_ADDR buildaddr
;
2460 EMIT_ASM32 (i386_reg_a
,
2462 buildaddr
= current_insn_ptr
;
2464 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2465 memcpy (&buf
[i
], ®
, sizeof (reg
));
2467 append_insns (&buildaddr
, i
, buf
);
2468 current_insn_ptr
= buildaddr
;
2469 EMIT_ASM32 (i386_reg_b
,
2470 "mov %eax,4(%esp)\n\t"
2471 "mov 8(%ebp),%eax\n\t"
2473 i386_emit_call (get_raw_reg_func_addr ());
2474 EMIT_ASM32 (i386_reg_c
,
2476 "lea 0x8(%esp),%esp");
2480 i386_emit_pop (void)
2482 EMIT_ASM32 (i386_pop
,
2488 i386_emit_stack_flush (void)
2490 EMIT_ASM32 (i386_stack_flush
,
2496 i386_emit_zero_ext (int arg
)
2501 EMIT_ASM32 (i386_zero_ext_8
,
2502 "and $0xff,%eax\n\t"
2506 EMIT_ASM32 (i386_zero_ext_16
,
2507 "and $0xffff,%eax\n\t"
2511 EMIT_ASM32 (i386_zero_ext_32
,
2520 i386_emit_swap (void)
2522 EMIT_ASM32 (i386_swap
,
2532 i386_emit_stack_adjust (int n
)
2534 unsigned char buf
[16];
2536 CORE_ADDR buildaddr
= current_insn_ptr
;
2539 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2543 append_insns (&buildaddr
, i
, buf
);
2544 current_insn_ptr
= buildaddr
;
2547 /* FN's prototype is `LONGEST(*fn)(int)'. */
2550 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2552 unsigned char buf
[16];
2554 CORE_ADDR buildaddr
;
2556 EMIT_ASM32 (i386_int_call_1_a
,
2557 /* Reserve a bit of stack space. */
2559 /* Put the one argument on the stack. */
2560 buildaddr
= current_insn_ptr
;
2562 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2565 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2567 append_insns (&buildaddr
, i
, buf
);
2568 current_insn_ptr
= buildaddr
;
2569 i386_emit_call (fn
);
2570 EMIT_ASM32 (i386_int_call_1_c
,
2572 "lea 0x8(%esp),%esp");
2575 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2578 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2580 unsigned char buf
[16];
2582 CORE_ADDR buildaddr
;
2584 EMIT_ASM32 (i386_void_call_2_a
,
2585 /* Preserve %eax only; we don't have to worry about %ebx. */
2587 /* Reserve a bit of stack space for arguments. */
2588 "sub $0x10,%esp\n\t"
2589 /* Copy "top" to the second argument position. (Note that
2590 we can't assume function won't scribble on its
2591 arguments, so don't try to restore from this.) */
2592 "mov %eax,4(%esp)\n\t"
2593 "mov %ebx,8(%esp)");
2594 /* Put the first argument on the stack. */
2595 buildaddr
= current_insn_ptr
;
2597 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2600 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2602 append_insns (&buildaddr
, i
, buf
);
2603 current_insn_ptr
= buildaddr
;
2604 i386_emit_call (fn
);
2605 EMIT_ASM32 (i386_void_call_2_b
,
2606 "lea 0x10(%esp),%esp\n\t"
2607 /* Restore original stack top. */
2613 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2616 /* Check low half first, more likely to be decider */
2617 "cmpl %eax,(%esp)\n\t"
2618 "jne .Leq_fallthru\n\t"
2619 "cmpl %ebx,4(%esp)\n\t"
2620 "jne .Leq_fallthru\n\t"
2621 "lea 0x8(%esp),%esp\n\t"
2624 /* jmp, but don't trust the assembler to choose the right jump */
2625 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2626 ".Leq_fallthru:\n\t"
2627 "lea 0x8(%esp),%esp\n\t"
2638 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2641 /* Check low half first, more likely to be decider */
2642 "cmpl %eax,(%esp)\n\t"
2644 "cmpl %ebx,4(%esp)\n\t"
2645 "je .Lne_fallthru\n\t"
2647 "lea 0x8(%esp),%esp\n\t"
2650 /* jmp, but don't trust the assembler to choose the right jump */
2651 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2652 ".Lne_fallthru:\n\t"
2653 "lea 0x8(%esp),%esp\n\t"
2664 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2667 "cmpl %ebx,4(%esp)\n\t"
2669 "jne .Llt_fallthru\n\t"
2670 "cmpl %eax,(%esp)\n\t"
2671 "jnl .Llt_fallthru\n\t"
2673 "lea 0x8(%esp),%esp\n\t"
2676 /* jmp, but don't trust the assembler to choose the right jump */
2677 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2678 ".Llt_fallthru:\n\t"
2679 "lea 0x8(%esp),%esp\n\t"
2690 i386_emit_le_goto (int *offset_p
, int *size_p
)
2693 "cmpl %ebx,4(%esp)\n\t"
2695 "jne .Lle_fallthru\n\t"
2696 "cmpl %eax,(%esp)\n\t"
2697 "jnle .Lle_fallthru\n\t"
2699 "lea 0x8(%esp),%esp\n\t"
2702 /* jmp, but don't trust the assembler to choose the right jump */
2703 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2704 ".Lle_fallthru:\n\t"
2705 "lea 0x8(%esp),%esp\n\t"
2716 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2719 "cmpl %ebx,4(%esp)\n\t"
2721 "jne .Lgt_fallthru\n\t"
2722 "cmpl %eax,(%esp)\n\t"
2723 "jng .Lgt_fallthru\n\t"
2725 "lea 0x8(%esp),%esp\n\t"
2728 /* jmp, but don't trust the assembler to choose the right jump */
2729 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2730 ".Lgt_fallthru:\n\t"
2731 "lea 0x8(%esp),%esp\n\t"
2742 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2745 "cmpl %ebx,4(%esp)\n\t"
2747 "jne .Lge_fallthru\n\t"
2748 "cmpl %eax,(%esp)\n\t"
2749 "jnge .Lge_fallthru\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2754 /* jmp, but don't trust the assembler to choose the right jump */
2755 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2756 ".Lge_fallthru:\n\t"
2757 "lea 0x8(%esp),%esp\n\t"
2767 struct emit_ops i386_emit_ops
=
2775 i386_emit_rsh_signed
,
2776 i386_emit_rsh_unsigned
,
2784 i386_emit_less_signed
,
2785 i386_emit_less_unsigned
,
2789 i386_write_goto_address
,
2794 i386_emit_stack_flush
,
2797 i386_emit_stack_adjust
,
2798 i386_emit_int_call_1
,
2799 i386_emit_void_call_2
,
2809 static struct emit_ops
*
2813 if (is_64bit_tdesc ())
2814 return &amd64_emit_ops
;
2817 return &i386_emit_ops
;
2820 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2822 static const gdb_byte
*
2823 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2825 *size
= x86_breakpoint_len
;
2826 return x86_breakpoint
;
2830 x86_supports_range_stepping (void)
2835 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2839 x86_supports_hardware_single_step (void)
2845 x86_get_ipa_tdesc_idx (void)
2847 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2848 const struct target_desc
*tdesc
= regcache
->tdesc
;
2851 if (tdesc
== tdesc_amd64_linux
|| tdesc
== tdesc_amd64_linux_no_xml
2852 || tdesc
== tdesc_x32_linux
)
2853 return X86_TDESC_SSE
;
2854 if (tdesc
== tdesc_amd64_avx_linux
|| tdesc
== tdesc_x32_avx_linux
)
2855 return X86_TDESC_AVX
;
2856 if (tdesc
== tdesc_amd64_mpx_linux
)
2857 return X86_TDESC_MPX
;
2858 if (tdesc
== tdesc_amd64_avx512_linux
|| tdesc
== tdesc_x32_avx512_linux
)
2859 return X86_TDESC_AVX512
;
2862 if (tdesc
== tdesc_i386_mmx_linux
)
2863 return X86_TDESC_MMX
;
2864 if (tdesc
== tdesc_i386_linux
|| tdesc
== tdesc_i386_linux_no_xml
)
2865 return X86_TDESC_SSE
;
2866 if (tdesc
== tdesc_i386_avx_linux
)
2867 return X86_TDESC_AVX
;
2868 if (tdesc
== tdesc_i386_mpx_linux
)
2869 return X86_TDESC_MPX
;
2870 if (tdesc
== tdesc_i386_avx512_linux
)
2871 return X86_TDESC_AVX512
;
2876 /* This is initialized assuming an amd64 target.
2877 x86_arch_setup will correct it for i386 or amd64 targets. */
2879 struct linux_target_ops the_low_target
=
2882 x86_linux_regs_info
,
2883 x86_cannot_fetch_register
,
2884 x86_cannot_store_register
,
2885 NULL
, /* fetch_register */
2888 NULL
, /* breakpoint_kind_from_pc */
2889 x86_sw_breakpoint_from_kind
,
2893 x86_supports_z_point_type
,
2896 x86_stopped_by_watchpoint
,
2897 x86_stopped_data_address
,
2898 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2899 native i386 case (no registers smaller than an xfer unit), and are not
2900 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2903 /* need to fix up i386 siginfo if host is amd64 */
2905 x86_linux_new_process
,
2906 x86_linux_new_thread
,
2908 x86_linux_prepare_to_resume
,
2909 x86_linux_process_qsupported
,
2910 x86_supports_tracepoints
,
2911 x86_get_thread_area
,
2912 x86_install_fast_tracepoint_jump_pad
,
2914 x86_get_min_fast_tracepoint_insn_len
,
2915 x86_supports_range_stepping
,
2916 NULL
, /* breakpoint_kind_from_current_state */
2917 x86_supports_hardware_single_step
,
2918 x86_get_syscall_trapinfo
,
2919 x86_get_ipa_tdesc_idx
,
2923 initialize_low_arch (void)
2925 /* Initialize the Linux target descriptions. */
2927 init_registers_amd64_linux ();
2928 init_registers_amd64_avx_linux ();
2929 init_registers_amd64_avx512_linux ();
2930 init_registers_amd64_mpx_linux ();
2932 init_registers_x32_linux ();
2933 init_registers_x32_avx_linux ();
2934 init_registers_x32_avx512_linux ();
2936 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2937 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
2938 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2940 init_registers_i386_linux ();
2941 init_registers_i386_mmx_linux ();
2942 init_registers_i386_avx_linux ();
2943 init_registers_i386_avx512_linux ();
2944 init_registers_i386_mpx_linux ();
2946 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
2947 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
2948 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2950 initialize_regsets_info (&x86_regsets_info
);