1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
50 /* Defined in auto-generated file amd64-linux.c. */
51 void init_registers_amd64_linux (void);
52 extern const struct target_desc
*tdesc_amd64_linux
;
54 /* Defined in auto-generated file amd64-avx-linux.c. */
55 void init_registers_amd64_avx_linux (void);
56 extern const struct target_desc
*tdesc_amd64_avx_linux
;
58 /* Defined in auto-generated file amd64-avx512-linux.c. */
59 void init_registers_amd64_avx512_linux (void);
60 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
62 /* Defined in auto-generated file amd64-mpx-linux.c. */
63 void init_registers_amd64_mpx_linux (void);
64 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
66 /* Defined in auto-generated file x32-linux.c. */
67 void init_registers_x32_linux (void);
68 extern const struct target_desc
*tdesc_x32_linux
;
70 /* Defined in auto-generated file x32-avx-linux.c. */
71 void init_registers_x32_avx_linux (void);
72 extern const struct target_desc
*tdesc_x32_avx_linux
;
74 /* Defined in auto-generated file x32-avx512-linux.c. */
75 void init_registers_x32_avx512_linux (void);
76 extern const struct target_desc
*tdesc_x32_avx512_linux
;
80 /* Defined in auto-generated file i386-linux.c. */
81 void init_registers_i386_linux (void);
82 extern const struct target_desc
*tdesc_i386_linux
;
84 /* Defined in auto-generated file i386-mmx-linux.c. */
85 void init_registers_i386_mmx_linux (void);
86 extern const struct target_desc
*tdesc_i386_mmx_linux
;
88 /* Defined in auto-generated file i386-avx-linux.c. */
89 void init_registers_i386_avx_linux (void);
90 extern const struct target_desc
*tdesc_i386_avx_linux
;
92 /* Defined in auto-generated file i386-avx512-linux.c. */
93 void init_registers_i386_avx512_linux (void);
94 extern const struct target_desc
*tdesc_i386_avx512_linux
;
96 /* Defined in auto-generated file i386-mpx-linux.c. */
97 void init_registers_i386_mpx_linux (void);
98 extern const struct target_desc
*tdesc_i386_mpx_linux
;
101 static struct target_desc
*tdesc_amd64_linux_no_xml
;
103 static struct target_desc
*tdesc_i386_linux_no_xml
;
106 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
107 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
109 /* Backward compatibility for gdb without XML support. */
111 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
112 <architecture>i386</architecture>\
113 <osabi>GNU/Linux</osabi>\
117 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
118 <architecture>i386:x86-64</architecture>\
119 <osabi>GNU/Linux</osabi>\
124 #include <sys/procfs.h>
125 #include "nat/gdb_ptrace.h"
128 #ifndef PTRACE_GET_THREAD_AREA
129 #define PTRACE_GET_THREAD_AREA 25
132 /* This definition comes from prctl.h, but some kernels may not have it. */
133 #ifndef PTRACE_ARCH_PRCTL
134 #define PTRACE_ARCH_PRCTL 30
137 /* The following definitions come from prctl.h, but may be absent
138 for certain configurations. */
140 #define ARCH_SET_GS 0x1001
141 #define ARCH_SET_FS 0x1002
142 #define ARCH_GET_FS 0x1003
143 #define ARCH_GET_GS 0x1004
146 /* Per-process arch-specific data we want to keep. */
148 struct arch_process_info
150 struct x86_debug_reg_state debug_reg_state
;
155 /* Mapping between the general-purpose registers in `struct user'
156 format and GDB's register array layout.
157 Note that the transfer layout uses 64-bit regs. */
158 static /*const*/ int i386_regmap
[] =
160 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
161 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
162 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
163 DS
* 8, ES
* 8, FS
* 8, GS
* 8
166 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
168 /* So code below doesn't have to care, i386 or amd64. */
169 #define ORIG_EAX ORIG_RAX
172 static const int x86_64_regmap
[] =
174 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
175 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
176 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
177 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
178 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
179 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
187 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
188 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1
199 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
200 #define X86_64_USER_REGS (GS + 1)
202 #else /* ! __x86_64__ */
204 /* Mapping between the general-purpose registers in `struct user'
205 format and GDB's register array layout. */
206 static /*const*/ int i386_regmap
[] =
208 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
209 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
210 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
211 DS
* 4, ES
* 4, FS
* 4, GS
* 4
214 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
222 /* Returns true if the current inferior belongs to a x86-64 process,
226 is_64bit_tdesc (void)
228 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
230 return register_size (regcache
->tdesc
, 0) == 8;
236 /* Called by libthread_db. */
239 ps_get_thread_area (const struct ps_prochandle
*ph
,
240 lwpid_t lwpid
, int idx
, void **base
)
243 int use_64bit
= is_64bit_tdesc ();
250 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
254 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
265 unsigned int desc
[4];
267 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
268 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
271 /* Ensure we properly extend the value to 64-bits for x86_64. */
272 *base
= (void *) (uintptr_t) desc
[1];
277 /* Get the thread area address. This is used to recognize which
278 thread is which when tracing with the in-process agent library. We
279 don't read anything from the address, and treat it as opaque; it's
280 the address itself that we assume is unique per-thread. */
283 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
286 int use_64bit
= is_64bit_tdesc ();
291 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
293 *addr
= (CORE_ADDR
) (uintptr_t) base
;
302 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
303 struct thread_info
*thr
= get_lwp_thread (lwp
);
304 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
305 unsigned int desc
[4];
307 const int reg_thread_area
= 3; /* bits to scale down register value. */
310 collect_register_by_name (regcache
, "gs", &gs
);
312 idx
= gs
>> reg_thread_area
;
314 if (ptrace (PTRACE_GET_THREAD_AREA
,
316 (void *) (long) idx
, (unsigned long) &desc
) < 0)
327 x86_cannot_store_register (int regno
)
330 if (is_64bit_tdesc ())
334 return regno
>= I386_NUM_REGS
;
338 x86_cannot_fetch_register (int regno
)
341 if (is_64bit_tdesc ())
345 return regno
>= I386_NUM_REGS
;
349 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
354 if (register_size (regcache
->tdesc
, 0) == 8)
356 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
357 if (x86_64_regmap
[i
] != -1)
358 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
362 /* 32-bit inferior registers need to be zero-extended.
363 Callers would read uninitialized memory otherwise. */
364 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
367 for (i
= 0; i
< I386_NUM_REGS
; i
++)
368 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
370 collect_register_by_name (regcache
, "orig_eax",
371 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
375 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
380 if (register_size (regcache
->tdesc
, 0) == 8)
382 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
383 if (x86_64_regmap
[i
] != -1)
384 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
389 for (i
= 0; i
< I386_NUM_REGS
; i
++)
390 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
392 supply_register_by_name (regcache
, "orig_eax",
393 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
397 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
400 i387_cache_to_fxsave (regcache
, buf
);
402 i387_cache_to_fsave (regcache
, buf
);
407 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
410 i387_fxsave_to_cache (regcache
, buf
);
412 i387_fsave_to_cache (regcache
, buf
);
419 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
421 i387_cache_to_fxsave (regcache
, buf
);
425 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
427 i387_fxsave_to_cache (regcache
, buf
);
433 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
435 i387_cache_to_xsave (regcache
, buf
);
439 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
441 i387_xsave_to_cache (regcache
, buf
);
444 /* ??? The non-biarch i386 case stores all the i387 regs twice.
445 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
446 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
447 doesn't work. IWBN to avoid the duplication in the case where it
448 does work. Maybe the arch_setup routine could check whether it works
449 and update the supported regsets accordingly. */
451 static struct regset_info x86_regsets
[] =
453 #ifdef HAVE_PTRACE_GETREGS
454 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
456 x86_fill_gregset
, x86_store_gregset
},
457 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
458 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
460 # ifdef HAVE_PTRACE_GETFPXREGS
461 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
463 x86_fill_fpxregset
, x86_store_fpxregset
},
466 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
468 x86_fill_fpregset
, x86_store_fpregset
},
469 #endif /* HAVE_PTRACE_GETREGS */
474 x86_get_pc (struct regcache
*regcache
)
476 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
481 collect_register_by_name (regcache
, "rip", &pc
);
482 return (CORE_ADDR
) pc
;
487 collect_register_by_name (regcache
, "eip", &pc
);
488 return (CORE_ADDR
) pc
;
493 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
495 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
499 unsigned long newpc
= pc
;
500 supply_register_by_name (regcache
, "rip", &newpc
);
504 unsigned int newpc
= pc
;
505 supply_register_by_name (regcache
, "eip", &newpc
);
509 static const gdb_byte x86_breakpoint
[] = { 0xCC };
510 #define x86_breakpoint_len 1
513 x86_breakpoint_at (CORE_ADDR pc
)
517 (*the_target
->read_memory
) (pc
, &c
, 1);
524 /* Low-level function vector. */
525 struct x86_dr_low_type x86_dr_low
=
527 x86_linux_dr_set_control
,
528 x86_linux_dr_set_addr
,
529 x86_linux_dr_get_addr
,
530 x86_linux_dr_get_status
,
531 x86_linux_dr_get_control
,
535 /* Breakpoint/Watchpoint support. */
538 x86_supports_z_point_type (char z_type
)
544 case Z_PACKET_WRITE_WP
:
545 case Z_PACKET_ACCESS_WP
:
553 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
554 int size
, struct raw_breakpoint
*bp
)
556 struct process_info
*proc
= current_process ();
560 case raw_bkpt_type_hw
:
561 case raw_bkpt_type_write_wp
:
562 case raw_bkpt_type_access_wp
:
564 enum target_hw_bp_type hw_type
565 = raw_bkpt_type_to_target_hw_bp_type (type
);
566 struct x86_debug_reg_state
*state
567 = &proc
->priv
->arch_private
->debug_reg_state
;
569 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
579 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
580 int size
, struct raw_breakpoint
*bp
)
582 struct process_info
*proc
= current_process ();
586 case raw_bkpt_type_hw
:
587 case raw_bkpt_type_write_wp
:
588 case raw_bkpt_type_access_wp
:
590 enum target_hw_bp_type hw_type
591 = raw_bkpt_type_to_target_hw_bp_type (type
);
592 struct x86_debug_reg_state
*state
593 = &proc
->priv
->arch_private
->debug_reg_state
;
595 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
604 x86_stopped_by_watchpoint (void)
606 struct process_info
*proc
= current_process ();
607 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
611 x86_stopped_data_address (void)
613 struct process_info
*proc
= current_process ();
615 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
621 /* Called when a new process is created. */
623 static struct arch_process_info
*
624 x86_linux_new_process (void)
626 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
628 x86_low_init_dregs (&info
->debug_reg_state
);
633 /* Target routine for linux_new_fork. */
636 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
638 /* These are allocated by linux_add_process. */
639 gdb_assert (parent
->priv
!= NULL
640 && parent
->priv
->arch_private
!= NULL
);
641 gdb_assert (child
->priv
!= NULL
642 && child
->priv
->arch_private
!= NULL
);
644 /* Linux kernel before 2.6.33 commit
645 72f674d203cd230426437cdcf7dd6f681dad8b0d
646 will inherit hardware debug registers from parent
647 on fork/vfork/clone. Newer Linux kernels create such tasks with
648 zeroed debug registers.
650 GDB core assumes the child inherits the watchpoints/hw
651 breakpoints of the parent, and will remove them all from the
652 forked off process. Copy the debug registers mirrors into the
653 new process so that all breakpoints and watchpoints can be
654 removed together. The debug registers mirror will become zeroed
655 in the end before detaching the forked off process, thus making
656 this compatible with older Linux kernels too. */
658 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
661 /* See nat/x86-dregs.h. */
663 struct x86_debug_reg_state
*
664 x86_debug_reg_state (pid_t pid
)
666 struct process_info
*proc
= find_process_pid (pid
);
668 return &proc
->priv
->arch_private
->debug_reg_state
;
671 /* When GDBSERVER is built as a 64-bit application on linux, the
672 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
673 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
674 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
675 conversion in-place ourselves. */
677 /* Convert a native/host siginfo object, into/from the siginfo in the
678 layout of the inferiors' architecture. Returns true if any
679 conversion was done; false otherwise. If DIRECTION is 1, then copy
680 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
684 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
687 unsigned int machine
;
688 int tid
= lwpid_of (current_thread
);
689 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
691 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
692 if (!is_64bit_tdesc ())
693 return amd64_linux_siginfo_fixup_common (native
, inf
, direction
,
695 /* No fixup for native x32 GDB. */
696 else if (!is_elf64
&& sizeof (void *) == 8)
697 return amd64_linux_siginfo_fixup_common (native
, inf
, direction
,
706 /* Format of XSAVE extended state is:
710 sw_usable_bytes[464..511]
711 xstate_hdr_bytes[512..575]
716 Same memory layout will be used for the coredump NT_X86_XSTATE
717 representing the XSAVE extended state registers.
719 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
720 extended state mask, which is the same as the extended control register
721 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
722 together with the mask saved in the xstate_hdr_bytes to determine what
723 states the processor/OS supports and what state, used or initialized,
724 the process/thread is in. */
725 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
727 /* Does the current host support the GETFPXREGS request? The header
728 file may or may not define it, and even if it is defined, the
729 kernel will return EIO if it's running on a pre-SSE processor. */
730 int have_ptrace_getfpxregs
=
731 #ifdef HAVE_PTRACE_GETFPXREGS
738 /* Get Linux/x86 target description from running target. */
740 static const struct target_desc
*
741 x86_linux_read_description (void)
743 unsigned int machine
;
747 static uint64_t xcr0
;
748 struct regset_info
*regset
;
750 tid
= lwpid_of (current_thread
);
752 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
754 if (sizeof (void *) == 4)
757 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
759 else if (machine
== EM_X86_64
)
760 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
764 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
765 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
767 elf_fpxregset_t fpxregs
;
769 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
771 have_ptrace_getfpxregs
= 0;
772 have_ptrace_getregset
= 0;
773 return tdesc_i386_mmx_linux
;
776 have_ptrace_getfpxregs
= 1;
782 x86_xcr0
= X86_XSTATE_SSE_MASK
;
786 if (machine
== EM_X86_64
)
787 return tdesc_amd64_linux_no_xml
;
790 return tdesc_i386_linux_no_xml
;
793 if (have_ptrace_getregset
== -1)
795 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
798 iov
.iov_base
= xstateregs
;
799 iov
.iov_len
= sizeof (xstateregs
);
801 /* Check if PTRACE_GETREGSET works. */
802 if (ptrace (PTRACE_GETREGSET
, tid
,
803 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
804 have_ptrace_getregset
= 0;
807 have_ptrace_getregset
= 1;
809 /* Get XCR0 from XSAVE extended state. */
810 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
811 / sizeof (uint64_t))];
813 /* Use PTRACE_GETREGSET if it is available. */
814 for (regset
= x86_regsets
;
815 regset
->fill_function
!= NULL
; regset
++)
816 if (regset
->get_request
== PTRACE_GETREGSET
)
817 regset
->size
= X86_XSTATE_SIZE (xcr0
);
818 else if (regset
->type
!= GENERAL_REGS
)
823 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
824 xcr0_features
= (have_ptrace_getregset
825 && (xcr0
& X86_XSTATE_ALL_MASK
));
830 if (machine
== EM_X86_64
)
837 switch (xcr0
& X86_XSTATE_ALL_MASK
)
839 case X86_XSTATE_AVX512_MASK
:
840 return tdesc_amd64_avx512_linux
;
842 case X86_XSTATE_MPX_MASK
:
843 return tdesc_amd64_mpx_linux
;
845 case X86_XSTATE_AVX_MASK
:
846 return tdesc_amd64_avx_linux
;
849 return tdesc_amd64_linux
;
853 return tdesc_amd64_linux
;
859 switch (xcr0
& X86_XSTATE_ALL_MASK
)
861 case X86_XSTATE_AVX512_MASK
:
862 return tdesc_x32_avx512_linux
;
864 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
865 case X86_XSTATE_AVX_MASK
:
866 return tdesc_x32_avx_linux
;
869 return tdesc_x32_linux
;
873 return tdesc_x32_linux
;
881 switch (xcr0
& X86_XSTATE_ALL_MASK
)
883 case (X86_XSTATE_AVX512_MASK
):
884 return tdesc_i386_avx512_linux
;
886 case (X86_XSTATE_MPX_MASK
):
887 return tdesc_i386_mpx_linux
;
889 case (X86_XSTATE_AVX_MASK
):
890 return tdesc_i386_avx_linux
;
893 return tdesc_i386_linux
;
897 return tdesc_i386_linux
;
900 gdb_assert_not_reached ("failed to return tdesc");
903 /* Callback for find_inferior. Stops iteration when a thread with a
904 given PID is found. */
907 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
909 int pid
= *(int *) data
;
911 return (ptid_get_pid (entry
->id
) == pid
);
914 /* Callback for for_each_inferior. Calls the arch_setup routine for
918 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
920 int pid
= ptid_get_pid (entry
->id
);
922 /* Look up any thread of this processes. */
924 = (struct thread_info
*) find_inferior (&all_threads
,
925 same_process_callback
, &pid
);
927 the_low_target
.arch_setup ();
930 /* Update all the target description of all processes; a new GDB
931 connected, and it may or not support xml target descriptions. */
934 x86_linux_update_xmltarget (void)
936 struct thread_info
*saved_thread
= current_thread
;
938 /* Before changing the register cache's internal layout, flush the
939 contents of the current valid caches back to the threads, and
940 release the current regcache objects. */
943 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
945 current_thread
= saved_thread
;
948 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
952 x86_linux_process_qsupported (char **features
, int count
)
956 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
957 with "i386" in qSupported query, it supports x86 XML target
960 for (i
= 0; i
< count
; i
++)
962 const char *feature
= features
[i
];
964 if (startswith (feature
, "xmlRegisters="))
966 char *copy
= xstrdup (feature
+ 13);
969 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
971 if (strcmp (p
, "i386") == 0)
981 x86_linux_update_xmltarget ();
984 /* Common for x86/x86-64. */
986 static struct regsets_info x86_regsets_info
=
988 x86_regsets
, /* regsets */
990 NULL
, /* disabled_regsets */
994 static struct regs_info amd64_linux_regs_info
=
996 NULL
, /* regset_bitmap */
997 NULL
, /* usrregs_info */
1001 static struct usrregs_info i386_linux_usrregs_info
=
1007 static struct regs_info i386_linux_regs_info
=
1009 NULL
, /* regset_bitmap */
1010 &i386_linux_usrregs_info
,
1014 const struct regs_info
*
1015 x86_linux_regs_info (void)
1018 if (is_64bit_tdesc ())
1019 return &amd64_linux_regs_info
;
1022 return &i386_linux_regs_info
;
1025 /* Initialize the target description for the architecture of the
1029 x86_arch_setup (void)
1031 current_process ()->tdesc
= x86_linux_read_description ();
1034 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1035 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1038 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
, int *sysret
)
1040 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1047 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1048 collect_register_by_name (regcache
, "rax", &l_sysret
);
1049 *sysno
= (int) l_sysno
;
1050 *sysret
= (int) l_sysret
;
1054 collect_register_by_name (regcache
, "orig_eax", sysno
);
1055 collect_register_by_name (regcache
, "eax", sysret
);
1060 x86_supports_tracepoints (void)
1066 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1068 write_inferior_memory (*to
, buf
, len
);
1073 push_opcode (unsigned char *buf
, char *op
)
1075 unsigned char *buf_org
= buf
;
1080 unsigned long ul
= strtoul (op
, &endptr
, 16);
1089 return buf
- buf_org
;
1094 /* Build a jump pad that saves registers and calls a collection
1095 function. Writes a jump instruction to the jump pad to
1096 JJUMPAD_INSN. The caller is responsible to write it in at the
1097 tracepoint address. */
1100 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1101 CORE_ADDR collector
,
1104 CORE_ADDR
*jump_entry
,
1105 CORE_ADDR
*trampoline
,
1106 ULONGEST
*trampoline_size
,
1107 unsigned char *jjump_pad_insn
,
1108 ULONGEST
*jjump_pad_insn_size
,
1109 CORE_ADDR
*adjusted_insn_addr
,
1110 CORE_ADDR
*adjusted_insn_addr_end
,
1113 unsigned char buf
[40];
1117 CORE_ADDR buildaddr
= *jump_entry
;
1119 /* Build the jump pad. */
1121 /* First, do tracepoint data collection. Save registers. */
1123 /* Need to ensure stack pointer saved first. */
1124 buf
[i
++] = 0x54; /* push %rsp */
1125 buf
[i
++] = 0x55; /* push %rbp */
1126 buf
[i
++] = 0x57; /* push %rdi */
1127 buf
[i
++] = 0x56; /* push %rsi */
1128 buf
[i
++] = 0x52; /* push %rdx */
1129 buf
[i
++] = 0x51; /* push %rcx */
1130 buf
[i
++] = 0x53; /* push %rbx */
1131 buf
[i
++] = 0x50; /* push %rax */
1132 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1133 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1134 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1135 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1136 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1137 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1138 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1139 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1140 buf
[i
++] = 0x9c; /* pushfq */
1141 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1143 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1144 i
+= sizeof (unsigned long);
1145 buf
[i
++] = 0x57; /* push %rdi */
1146 append_insns (&buildaddr
, i
, buf
);
1148 /* Stack space for the collecting_t object. */
1150 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1151 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1152 memcpy (buf
+ i
, &tpoint
, 8);
1154 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1155 i
+= push_opcode (&buf
[i
],
1156 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1157 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1158 append_insns (&buildaddr
, i
, buf
);
1162 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1163 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1165 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1166 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1167 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1168 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1169 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1170 append_insns (&buildaddr
, i
, buf
);
1172 /* Set up the gdb_collect call. */
1173 /* At this point, (stack pointer + 0x18) is the base of our saved
1177 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1178 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1180 /* tpoint address may be 64-bit wide. */
1181 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1182 memcpy (buf
+ i
, &tpoint
, 8);
1184 append_insns (&buildaddr
, i
, buf
);
1186 /* The collector function being in the shared library, may be
1187 >31-bits away off the jump pad. */
1189 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1190 memcpy (buf
+ i
, &collector
, 8);
1192 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1193 append_insns (&buildaddr
, i
, buf
);
1195 /* Clear the spin-lock. */
1197 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1198 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1199 memcpy (buf
+ i
, &lockaddr
, 8);
1201 append_insns (&buildaddr
, i
, buf
);
1203 /* Remove stack that had been used for the collect_t object. */
1205 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1206 append_insns (&buildaddr
, i
, buf
);
1208 /* Restore register state. */
1210 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1214 buf
[i
++] = 0x9d; /* popfq */
1215 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1216 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1217 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1223 buf
[i
++] = 0x58; /* pop %rax */
1224 buf
[i
++] = 0x5b; /* pop %rbx */
1225 buf
[i
++] = 0x59; /* pop %rcx */
1226 buf
[i
++] = 0x5a; /* pop %rdx */
1227 buf
[i
++] = 0x5e; /* pop %rsi */
1228 buf
[i
++] = 0x5f; /* pop %rdi */
1229 buf
[i
++] = 0x5d; /* pop %rbp */
1230 buf
[i
++] = 0x5c; /* pop %rsp */
1231 append_insns (&buildaddr
, i
, buf
);
1233 /* Now, adjust the original instruction to execute in the jump
1235 *adjusted_insn_addr
= buildaddr
;
1236 relocate_instruction (&buildaddr
, tpaddr
);
1237 *adjusted_insn_addr_end
= buildaddr
;
1239 /* Finally, write a jump back to the program. */
1241 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1242 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1245 "E.Jump back from jump pad too far from tracepoint "
1246 "(offset 0x%" PRIx64
" > int32).", loffset
);
1250 offset
= (int) loffset
;
1251 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1252 memcpy (buf
+ 1, &offset
, 4);
1253 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1255 /* The jump pad is now built. Wire in a jump to our jump pad. This
1256 is always done last (by our caller actually), so that we can
1257 install fast tracepoints with threads running. This relies on
1258 the agent's atomic write support. */
1259 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1260 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1263 "E.Jump pad too far from tracepoint "
1264 "(offset 0x%" PRIx64
" > int32).", loffset
);
1268 offset
= (int) loffset
;
1270 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1271 memcpy (buf
+ 1, &offset
, 4);
1272 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1273 *jjump_pad_insn_size
= sizeof (jump_insn
);
1275 /* Return the end address of our pad. */
1276 *jump_entry
= buildaddr
;
1281 #endif /* __x86_64__ */
1283 /* Build a jump pad that saves registers and calls a collection
1284 function. Writes a jump instruction to the jump pad to
1285 JJUMPAD_INSN. The caller is responsible to write it in at the
1286 tracepoint address. */
1289 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1290 CORE_ADDR collector
,
1293 CORE_ADDR
*jump_entry
,
1294 CORE_ADDR
*trampoline
,
1295 ULONGEST
*trampoline_size
,
1296 unsigned char *jjump_pad_insn
,
1297 ULONGEST
*jjump_pad_insn_size
,
1298 CORE_ADDR
*adjusted_insn_addr
,
1299 CORE_ADDR
*adjusted_insn_addr_end
,
1302 unsigned char buf
[0x100];
1304 CORE_ADDR buildaddr
= *jump_entry
;
1306 /* Build the jump pad. */
1308 /* First, do tracepoint data collection. Save registers. */
1310 buf
[i
++] = 0x60; /* pushad */
1311 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1312 *((int *)(buf
+ i
)) = (int) tpaddr
;
1314 buf
[i
++] = 0x9c; /* pushf */
1315 buf
[i
++] = 0x1e; /* push %ds */
1316 buf
[i
++] = 0x06; /* push %es */
1317 buf
[i
++] = 0x0f; /* push %fs */
1319 buf
[i
++] = 0x0f; /* push %gs */
1321 buf
[i
++] = 0x16; /* push %ss */
1322 buf
[i
++] = 0x0e; /* push %cs */
1323 append_insns (&buildaddr
, i
, buf
);
1325 /* Stack space for the collecting_t object. */
1327 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1329 /* Build the object. */
1330 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1331 memcpy (buf
+ i
, &tpoint
, 4);
1333 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1335 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1336 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1337 append_insns (&buildaddr
, i
, buf
);
1339 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1340 If we cared for it, this could be using xchg alternatively. */
1343 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1344 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1346 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1348 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1349 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1350 append_insns (&buildaddr
, i
, buf
);
1353 /* Set up arguments to the gdb_collect call. */
1355 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1356 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1357 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1358 append_insns (&buildaddr
, i
, buf
);
1361 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1362 append_insns (&buildaddr
, i
, buf
);
1365 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1366 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1368 append_insns (&buildaddr
, i
, buf
);
1370 buf
[0] = 0xe8; /* call <reladdr> */
1371 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1372 memcpy (buf
+ 1, &offset
, 4);
1373 append_insns (&buildaddr
, 5, buf
);
1374 /* Clean up after the call. */
1375 buf
[0] = 0x83; /* add $0x8,%esp */
1378 append_insns (&buildaddr
, 3, buf
);
1381 /* Clear the spin-lock. This would need the LOCK prefix on older
1384 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1385 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1386 memcpy (buf
+ i
, &lockaddr
, 4);
1388 append_insns (&buildaddr
, i
, buf
);
1391 /* Remove stack that had been used for the collect_t object. */
1393 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1394 append_insns (&buildaddr
, i
, buf
);
1397 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1400 buf
[i
++] = 0x17; /* pop %ss */
1401 buf
[i
++] = 0x0f; /* pop %gs */
1403 buf
[i
++] = 0x0f; /* pop %fs */
1405 buf
[i
++] = 0x07; /* pop %es */
1406 buf
[i
++] = 0x1f; /* pop %ds */
1407 buf
[i
++] = 0x9d; /* popf */
1408 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1411 buf
[i
++] = 0x61; /* popad */
1412 append_insns (&buildaddr
, i
, buf
);
1414 /* Now, adjust the original instruction to execute in the jump
1416 *adjusted_insn_addr
= buildaddr
;
1417 relocate_instruction (&buildaddr
, tpaddr
);
1418 *adjusted_insn_addr_end
= buildaddr
;
1420 /* Write the jump back to the program. */
1421 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1422 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1423 memcpy (buf
+ 1, &offset
, 4);
1424 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1426 /* The jump pad is now built. Wire in a jump to our jump pad. This
1427 is always done last (by our caller actually), so that we can
1428 install fast tracepoints with threads running. This relies on
1429 the agent's atomic write support. */
1432 /* Create a trampoline. */
1433 *trampoline_size
= sizeof (jump_insn
);
1434 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1436 /* No trampoline space available. */
1438 "E.Cannot allocate trampoline space needed for fast "
1439 "tracepoints on 4-byte instructions.");
1443 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1444 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1445 memcpy (buf
+ 1, &offset
, 4);
1446 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1448 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1449 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1450 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1451 memcpy (buf
+ 2, &offset
, 2);
1452 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1453 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1457 /* Else use a 32-bit relative jump instruction. */
1458 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1459 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1460 memcpy (buf
+ 1, &offset
, 4);
1461 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1462 *jjump_pad_insn_size
= sizeof (jump_insn
);
1465 /* Return the end address of our pad. */
1466 *jump_entry
= buildaddr
;
1472 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1473 CORE_ADDR collector
,
1476 CORE_ADDR
*jump_entry
,
1477 CORE_ADDR
*trampoline
,
1478 ULONGEST
*trampoline_size
,
1479 unsigned char *jjump_pad_insn
,
1480 ULONGEST
*jjump_pad_insn_size
,
1481 CORE_ADDR
*adjusted_insn_addr
,
1482 CORE_ADDR
*adjusted_insn_addr_end
,
1486 if (is_64bit_tdesc ())
1487 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1488 collector
, lockaddr
,
1489 orig_size
, jump_entry
,
1490 trampoline
, trampoline_size
,
1492 jjump_pad_insn_size
,
1494 adjusted_insn_addr_end
,
1498 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1499 collector
, lockaddr
,
1500 orig_size
, jump_entry
,
1501 trampoline
, trampoline_size
,
1503 jjump_pad_insn_size
,
1505 adjusted_insn_addr_end
,
1509 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1513 x86_get_min_fast_tracepoint_insn_len (void)
1515 static int warned_about_fast_tracepoints
= 0;
1518 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1519 used for fast tracepoints. */
1520 if (is_64bit_tdesc ())
1524 if (agent_loaded_p ())
1526 char errbuf
[IPA_BUFSIZ
];
1530 /* On x86, if trampolines are available, then 4-byte jump instructions
1531 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1532 with a 4-byte offset are used instead. */
1533 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1537 /* GDB has no channel to explain to user why a shorter fast
1538 tracepoint is not possible, but at least make GDBserver
1539 mention that something has gone awry. */
1540 if (!warned_about_fast_tracepoints
)
1542 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1543 warned_about_fast_tracepoints
= 1;
1550 /* Indicate that the minimum length is currently unknown since the IPA
1551 has not loaded yet. */
1557 add_insns (unsigned char *start
, int len
)
1559 CORE_ADDR buildaddr
= current_insn_ptr
;
1562 debug_printf ("Adding %d bytes of insn at %s\n",
1563 len
, paddress (buildaddr
));
1565 append_insns (&buildaddr
, len
, start
);
1566 current_insn_ptr
= buildaddr
;
1569 /* Our general strategy for emitting code is to avoid specifying raw
1570 bytes whenever possible, and instead copy a block of inline asm
1571 that is embedded in the function. This is a little messy, because
1572 we need to keep the compiler from discarding what looks like dead
1573 code, plus suppress various warnings. */
1575 #define EMIT_ASM(NAME, INSNS) \
1578 extern unsigned char start_ ## NAME, end_ ## NAME; \
1579 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1580 __asm__ ("jmp end_" #NAME "\n" \
1581 "\t" "start_" #NAME ":" \
1583 "\t" "end_" #NAME ":"); \
1588 #define EMIT_ASM32(NAME,INSNS) \
1591 extern unsigned char start_ ## NAME, end_ ## NAME; \
1592 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1593 __asm__ (".code32\n" \
1594 "\t" "jmp end_" #NAME "\n" \
1595 "\t" "start_" #NAME ":\n" \
1597 "\t" "end_" #NAME ":\n" \
1603 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1610 amd64_emit_prologue (void)
1612 EMIT_ASM (amd64_prologue
,
1614 "movq %rsp,%rbp\n\t"
1615 "sub $0x20,%rsp\n\t"
1616 "movq %rdi,-8(%rbp)\n\t"
1617 "movq %rsi,-16(%rbp)");
1622 amd64_emit_epilogue (void)
1624 EMIT_ASM (amd64_epilogue
,
1625 "movq -16(%rbp),%rdi\n\t"
1626 "movq %rax,(%rdi)\n\t"
1633 amd64_emit_add (void)
1635 EMIT_ASM (amd64_add
,
1636 "add (%rsp),%rax\n\t"
1637 "lea 0x8(%rsp),%rsp");
1641 amd64_emit_sub (void)
1643 EMIT_ASM (amd64_sub
,
1644 "sub %rax,(%rsp)\n\t"
1649 amd64_emit_mul (void)
1655 amd64_emit_lsh (void)
1661 amd64_emit_rsh_signed (void)
1667 amd64_emit_rsh_unsigned (void)
1673 amd64_emit_ext (int arg
)
1678 EMIT_ASM (amd64_ext_8
,
1684 EMIT_ASM (amd64_ext_16
,
1689 EMIT_ASM (amd64_ext_32
,
1698 amd64_emit_log_not (void)
1700 EMIT_ASM (amd64_log_not
,
1701 "test %rax,%rax\n\t"
1707 amd64_emit_bit_and (void)
1709 EMIT_ASM (amd64_and
,
1710 "and (%rsp),%rax\n\t"
1711 "lea 0x8(%rsp),%rsp");
1715 amd64_emit_bit_or (void)
1718 "or (%rsp),%rax\n\t"
1719 "lea 0x8(%rsp),%rsp");
1723 amd64_emit_bit_xor (void)
1725 EMIT_ASM (amd64_xor
,
1726 "xor (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1731 amd64_emit_bit_not (void)
1733 EMIT_ASM (amd64_bit_not
,
1734 "xorq $0xffffffffffffffff,%rax");
1738 amd64_emit_equal (void)
1740 EMIT_ASM (amd64_equal
,
1741 "cmp %rax,(%rsp)\n\t"
1742 "je .Lamd64_equal_true\n\t"
1744 "jmp .Lamd64_equal_end\n\t"
1745 ".Lamd64_equal_true:\n\t"
1747 ".Lamd64_equal_end:\n\t"
1748 "lea 0x8(%rsp),%rsp");
1752 amd64_emit_less_signed (void)
1754 EMIT_ASM (amd64_less_signed
,
1755 "cmp %rax,(%rsp)\n\t"
1756 "jl .Lamd64_less_signed_true\n\t"
1758 "jmp .Lamd64_less_signed_end\n\t"
1759 ".Lamd64_less_signed_true:\n\t"
1761 ".Lamd64_less_signed_end:\n\t"
1762 "lea 0x8(%rsp),%rsp");
1766 amd64_emit_less_unsigned (void)
1768 EMIT_ASM (amd64_less_unsigned
,
1769 "cmp %rax,(%rsp)\n\t"
1770 "jb .Lamd64_less_unsigned_true\n\t"
1772 "jmp .Lamd64_less_unsigned_end\n\t"
1773 ".Lamd64_less_unsigned_true:\n\t"
1775 ".Lamd64_less_unsigned_end:\n\t"
1776 "lea 0x8(%rsp),%rsp");
1780 amd64_emit_ref (int size
)
1785 EMIT_ASM (amd64_ref1
,
1789 EMIT_ASM (amd64_ref2
,
1793 EMIT_ASM (amd64_ref4
,
1794 "movl (%rax),%eax");
1797 EMIT_ASM (amd64_ref8
,
1798 "movq (%rax),%rax");
1804 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1806 EMIT_ASM (amd64_if_goto
,
1810 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1818 amd64_emit_goto (int *offset_p
, int *size_p
)
1820 EMIT_ASM (amd64_goto
,
1821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1829 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1831 int diff
= (to
- (from
+ size
));
1832 unsigned char buf
[sizeof (int)];
1840 memcpy (buf
, &diff
, sizeof (int));
1841 write_inferior_memory (from
, buf
, sizeof (int));
1845 amd64_emit_const (LONGEST num
)
1847 unsigned char buf
[16];
1849 CORE_ADDR buildaddr
= current_insn_ptr
;
1852 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1853 memcpy (&buf
[i
], &num
, sizeof (num
));
1855 append_insns (&buildaddr
, i
, buf
);
1856 current_insn_ptr
= buildaddr
;
1860 amd64_emit_call (CORE_ADDR fn
)
1862 unsigned char buf
[16];
1864 CORE_ADDR buildaddr
;
1867 /* The destination function being in the shared library, may be
1868 >31-bits away off the compiled code pad. */
1870 buildaddr
= current_insn_ptr
;
1872 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1876 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1878 /* Offset is too large for a call. Use callq, but that requires
1879 a register, so avoid it if possible. Use r10, since it is
1880 call-clobbered, we don't have to push/pop it. */
1881 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1883 memcpy (buf
+ i
, &fn
, 8);
1885 buf
[i
++] = 0xff; /* callq *%r10 */
1890 int offset32
= offset64
; /* we know we can't overflow here. */
1891 memcpy (buf
+ i
, &offset32
, 4);
1895 append_insns (&buildaddr
, i
, buf
);
1896 current_insn_ptr
= buildaddr
;
1900 amd64_emit_reg (int reg
)
1902 unsigned char buf
[16];
1904 CORE_ADDR buildaddr
;
1906 /* Assume raw_regs is still in %rdi. */
1907 buildaddr
= current_insn_ptr
;
1909 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1910 memcpy (&buf
[i
], ®
, sizeof (reg
));
1912 append_insns (&buildaddr
, i
, buf
);
1913 current_insn_ptr
= buildaddr
;
1914 amd64_emit_call (get_raw_reg_func_addr ());
1918 amd64_emit_pop (void)
1920 EMIT_ASM (amd64_pop
,
1925 amd64_emit_stack_flush (void)
1927 EMIT_ASM (amd64_stack_flush
,
1932 amd64_emit_zero_ext (int arg
)
1937 EMIT_ASM (amd64_zero_ext_8
,
1941 EMIT_ASM (amd64_zero_ext_16
,
1942 "and $0xffff,%rax");
1945 EMIT_ASM (amd64_zero_ext_32
,
1946 "mov $0xffffffff,%rcx\n\t"
1955 amd64_emit_swap (void)
1957 EMIT_ASM (amd64_swap
,
1964 amd64_emit_stack_adjust (int n
)
1966 unsigned char buf
[16];
1968 CORE_ADDR buildaddr
= current_insn_ptr
;
1971 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1975 /* This only handles adjustments up to 16, but we don't expect any more. */
1977 append_insns (&buildaddr
, i
, buf
);
1978 current_insn_ptr
= buildaddr
;
1981 /* FN's prototype is `LONGEST(*fn)(int)'. */
1984 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1986 unsigned char buf
[16];
1988 CORE_ADDR buildaddr
;
1990 buildaddr
= current_insn_ptr
;
1992 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1993 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1995 append_insns (&buildaddr
, i
, buf
);
1996 current_insn_ptr
= buildaddr
;
1997 amd64_emit_call (fn
);
2000 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2003 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2005 unsigned char buf
[16];
2007 CORE_ADDR buildaddr
;
2009 buildaddr
= current_insn_ptr
;
2011 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2012 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2014 append_insns (&buildaddr
, i
, buf
);
2015 current_insn_ptr
= buildaddr
;
2016 EMIT_ASM (amd64_void_call_2_a
,
2017 /* Save away a copy of the stack top. */
2019 /* Also pass top as the second argument. */
2021 amd64_emit_call (fn
);
2022 EMIT_ASM (amd64_void_call_2_b
,
2023 /* Restore the stack top, %rax may have been trashed. */
2028 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2031 "cmp %rax,(%rsp)\n\t"
2032 "jne .Lamd64_eq_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_eq_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2048 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2051 "cmp %rax,(%rsp)\n\t"
2052 "je .Lamd64_ne_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_ne_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2068 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2071 "cmp %rax,(%rsp)\n\t"
2072 "jnl .Lamd64_lt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_lt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2088 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnle .Lamd64_le_fallthru\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2095 /* jmp, but don't trust the assembler to choose the right jump */
2096 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2097 ".Lamd64_le_fallthru:\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2108 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2111 "cmp %rax,(%rsp)\n\t"
2112 "jng .Lamd64_gt_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_gt_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2128 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2131 "cmp %rax,(%rsp)\n\t"
2132 "jnge .Lamd64_ge_fallthru\n\t"
2133 ".Lamd64_ge_jump:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2136 /* jmp, but don't trust the assembler to choose the right jump */
2137 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2138 ".Lamd64_ge_fallthru:\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2148 struct emit_ops amd64_emit_ops
=
2150 amd64_emit_prologue
,
2151 amd64_emit_epilogue
,
2156 amd64_emit_rsh_signed
,
2157 amd64_emit_rsh_unsigned
,
2165 amd64_emit_less_signed
,
2166 amd64_emit_less_unsigned
,
2170 amd64_write_goto_address
,
2175 amd64_emit_stack_flush
,
2176 amd64_emit_zero_ext
,
2178 amd64_emit_stack_adjust
,
2179 amd64_emit_int_call_1
,
2180 amd64_emit_void_call_2
,
2189 #endif /* __x86_64__ */
2192 i386_emit_prologue (void)
2194 EMIT_ASM32 (i386_prologue
,
2198 /* At this point, the raw regs base address is at 8(%ebp), and the
2199 value pointer is at 12(%ebp). */
2203 i386_emit_epilogue (void)
2205 EMIT_ASM32 (i386_epilogue
,
2206 "mov 12(%ebp),%ecx\n\t"
2207 "mov %eax,(%ecx)\n\t"
2208 "mov %ebx,0x4(%ecx)\n\t"
2216 i386_emit_add (void)
2218 EMIT_ASM32 (i386_add
,
2219 "add (%esp),%eax\n\t"
2220 "adc 0x4(%esp),%ebx\n\t"
2221 "lea 0x8(%esp),%esp");
2225 i386_emit_sub (void)
2227 EMIT_ASM32 (i386_sub
,
2228 "subl %eax,(%esp)\n\t"
2229 "sbbl %ebx,4(%esp)\n\t"
2235 i386_emit_mul (void)
2241 i386_emit_lsh (void)
2247 i386_emit_rsh_signed (void)
2253 i386_emit_rsh_unsigned (void)
2259 i386_emit_ext (int arg
)
2264 EMIT_ASM32 (i386_ext_8
,
2267 "movl %eax,%ebx\n\t"
2271 EMIT_ASM32 (i386_ext_16
,
2273 "movl %eax,%ebx\n\t"
2277 EMIT_ASM32 (i386_ext_32
,
2278 "movl %eax,%ebx\n\t"
2287 i386_emit_log_not (void)
2289 EMIT_ASM32 (i386_log_not
,
2291 "test %eax,%eax\n\t"
2298 i386_emit_bit_and (void)
2300 EMIT_ASM32 (i386_and
,
2301 "and (%esp),%eax\n\t"
2302 "and 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2307 i386_emit_bit_or (void)
2309 EMIT_ASM32 (i386_or
,
2310 "or (%esp),%eax\n\t"
2311 "or 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2316 i386_emit_bit_xor (void)
2318 EMIT_ASM32 (i386_xor
,
2319 "xor (%esp),%eax\n\t"
2320 "xor 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2325 i386_emit_bit_not (void)
2327 EMIT_ASM32 (i386_bit_not
,
2328 "xor $0xffffffff,%eax\n\t"
2329 "xor $0xffffffff,%ebx\n\t");
2333 i386_emit_equal (void)
2335 EMIT_ASM32 (i386_equal
,
2336 "cmpl %ebx,4(%esp)\n\t"
2337 "jne .Li386_equal_false\n\t"
2338 "cmpl %eax,(%esp)\n\t"
2339 "je .Li386_equal_true\n\t"
2340 ".Li386_equal_false:\n\t"
2342 "jmp .Li386_equal_end\n\t"
2343 ".Li386_equal_true:\n\t"
2345 ".Li386_equal_end:\n\t"
2347 "lea 0x8(%esp),%esp");
2351 i386_emit_less_signed (void)
2353 EMIT_ASM32 (i386_less_signed
,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jl .Li386_less_signed_true\n\t"
2356 "jne .Li386_less_signed_false\n\t"
2357 "cmpl %eax,(%esp)\n\t"
2358 "jl .Li386_less_signed_true\n\t"
2359 ".Li386_less_signed_false:\n\t"
2361 "jmp .Li386_less_signed_end\n\t"
2362 ".Li386_less_signed_true:\n\t"
2364 ".Li386_less_signed_end:\n\t"
2366 "lea 0x8(%esp),%esp");
2370 i386_emit_less_unsigned (void)
2372 EMIT_ASM32 (i386_less_unsigned
,
2373 "cmpl %ebx,4(%esp)\n\t"
2374 "jb .Li386_less_unsigned_true\n\t"
2375 "jne .Li386_less_unsigned_false\n\t"
2376 "cmpl %eax,(%esp)\n\t"
2377 "jb .Li386_less_unsigned_true\n\t"
2378 ".Li386_less_unsigned_false:\n\t"
2380 "jmp .Li386_less_unsigned_end\n\t"
2381 ".Li386_less_unsigned_true:\n\t"
2383 ".Li386_less_unsigned_end:\n\t"
2385 "lea 0x8(%esp),%esp");
2389 i386_emit_ref (int size
)
2394 EMIT_ASM32 (i386_ref1
,
2398 EMIT_ASM32 (i386_ref2
,
2402 EMIT_ASM32 (i386_ref4
,
2403 "movl (%eax),%eax");
2406 EMIT_ASM32 (i386_ref8
,
2407 "movl 4(%eax),%ebx\n\t"
2408 "movl (%eax),%eax");
2414 i386_emit_if_goto (int *offset_p
, int *size_p
)
2416 EMIT_ASM32 (i386_if_goto
,
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2426 *offset_p
= 11; /* be sure that this matches the sequence above */
2432 i386_emit_goto (int *offset_p
, int *size_p
)
2434 EMIT_ASM32 (i386_goto
,
2435 /* Don't trust the assembler to choose the right jump */
2436 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2444 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2446 int diff
= (to
- (from
+ size
));
2447 unsigned char buf
[sizeof (int)];
2449 /* We're only doing 4-byte sizes at the moment. */
2456 memcpy (buf
, &diff
, sizeof (int));
2457 write_inferior_memory (from
, buf
, sizeof (int));
2461 i386_emit_const (LONGEST num
)
2463 unsigned char buf
[16];
2465 CORE_ADDR buildaddr
= current_insn_ptr
;
2468 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2469 lo
= num
& 0xffffffff;
2470 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2472 hi
= ((num
>> 32) & 0xffffffff);
2475 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2476 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2481 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2483 append_insns (&buildaddr
, i
, buf
);
2484 current_insn_ptr
= buildaddr
;
2488 i386_emit_call (CORE_ADDR fn
)
2490 unsigned char buf
[16];
2492 CORE_ADDR buildaddr
;
2494 buildaddr
= current_insn_ptr
;
2496 buf
[i
++] = 0xe8; /* call <reladdr> */
2497 offset
= ((int) fn
) - (buildaddr
+ 5);
2498 memcpy (buf
+ 1, &offset
, 4);
2499 append_insns (&buildaddr
, 5, buf
);
2500 current_insn_ptr
= buildaddr
;
2504 i386_emit_reg (int reg
)
2506 unsigned char buf
[16];
2508 CORE_ADDR buildaddr
;
2510 EMIT_ASM32 (i386_reg_a
,
2512 buildaddr
= current_insn_ptr
;
2514 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2515 memcpy (&buf
[i
], ®
, sizeof (reg
));
2517 append_insns (&buildaddr
, i
, buf
);
2518 current_insn_ptr
= buildaddr
;
2519 EMIT_ASM32 (i386_reg_b
,
2520 "mov %eax,4(%esp)\n\t"
2521 "mov 8(%ebp),%eax\n\t"
2523 i386_emit_call (get_raw_reg_func_addr ());
2524 EMIT_ASM32 (i386_reg_c
,
2526 "lea 0x8(%esp),%esp");
2530 i386_emit_pop (void)
2532 EMIT_ASM32 (i386_pop
,
2538 i386_emit_stack_flush (void)
2540 EMIT_ASM32 (i386_stack_flush
,
2546 i386_emit_zero_ext (int arg
)
2551 EMIT_ASM32 (i386_zero_ext_8
,
2552 "and $0xff,%eax\n\t"
2556 EMIT_ASM32 (i386_zero_ext_16
,
2557 "and $0xffff,%eax\n\t"
2561 EMIT_ASM32 (i386_zero_ext_32
,
2570 i386_emit_swap (void)
2572 EMIT_ASM32 (i386_swap
,
2582 i386_emit_stack_adjust (int n
)
2584 unsigned char buf
[16];
2586 CORE_ADDR buildaddr
= current_insn_ptr
;
2589 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2593 append_insns (&buildaddr
, i
, buf
);
2594 current_insn_ptr
= buildaddr
;
2597 /* FN's prototype is `LONGEST(*fn)(int)'. */
2600 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2602 unsigned char buf
[16];
2604 CORE_ADDR buildaddr
;
2606 EMIT_ASM32 (i386_int_call_1_a
,
2607 /* Reserve a bit of stack space. */
2609 /* Put the one argument on the stack. */
2610 buildaddr
= current_insn_ptr
;
2612 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2615 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2617 append_insns (&buildaddr
, i
, buf
);
2618 current_insn_ptr
= buildaddr
;
2619 i386_emit_call (fn
);
2620 EMIT_ASM32 (i386_int_call_1_c
,
2622 "lea 0x8(%esp),%esp");
2625 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2628 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2630 unsigned char buf
[16];
2632 CORE_ADDR buildaddr
;
2634 EMIT_ASM32 (i386_void_call_2_a
,
2635 /* Preserve %eax only; we don't have to worry about %ebx. */
2637 /* Reserve a bit of stack space for arguments. */
2638 "sub $0x10,%esp\n\t"
2639 /* Copy "top" to the second argument position. (Note that
2640 we can't assume function won't scribble on its
2641 arguments, so don't try to restore from this.) */
2642 "mov %eax,4(%esp)\n\t"
2643 "mov %ebx,8(%esp)");
2644 /* Put the first argument on the stack. */
2645 buildaddr
= current_insn_ptr
;
2647 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2650 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2652 append_insns (&buildaddr
, i
, buf
);
2653 current_insn_ptr
= buildaddr
;
2654 i386_emit_call (fn
);
2655 EMIT_ASM32 (i386_void_call_2_b
,
2656 "lea 0x10(%esp),%esp\n\t"
2657 /* Restore original stack top. */
2663 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2666 /* Check low half first, more likely to be decider */
2667 "cmpl %eax,(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "cmpl %ebx,4(%esp)\n\t"
2670 "jne .Leq_fallthru\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2674 /* jmp, but don't trust the assembler to choose the right jump */
2675 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2676 ".Leq_fallthru:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2688 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2691 /* Check low half first, more likely to be decider */
2692 "cmpl %eax,(%esp)\n\t"
2694 "cmpl %ebx,4(%esp)\n\t"
2695 "je .Lne_fallthru\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2700 /* jmp, but don't trust the assembler to choose the right jump */
2701 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2702 ".Lne_fallthru:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2714 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2717 "cmpl %ebx,4(%esp)\n\t"
2719 "jne .Llt_fallthru\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "jnl .Llt_fallthru\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2726 /* jmp, but don't trust the assembler to choose the right jump */
2727 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2728 ".Llt_fallthru:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2740 i386_emit_le_goto (int *offset_p
, int *size_p
)
2743 "cmpl %ebx,4(%esp)\n\t"
2745 "jne .Lle_fallthru\n\t"
2746 "cmpl %eax,(%esp)\n\t"
2747 "jnle .Lle_fallthru\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Lle_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2766 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2769 "cmpl %ebx,4(%esp)\n\t"
2771 "jne .Lgt_fallthru\n\t"
2772 "cmpl %eax,(%esp)\n\t"
2773 "jng .Lgt_fallthru\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lgt_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2792 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2795 "cmpl %ebx,4(%esp)\n\t"
2797 "jne .Lge_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnge .Lge_fallthru\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Lge_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2817 struct emit_ops i386_emit_ops
=
2825 i386_emit_rsh_signed
,
2826 i386_emit_rsh_unsigned
,
2834 i386_emit_less_signed
,
2835 i386_emit_less_unsigned
,
2839 i386_write_goto_address
,
2844 i386_emit_stack_flush
,
2847 i386_emit_stack_adjust
,
2848 i386_emit_int_call_1
,
2849 i386_emit_void_call_2
,
2859 static struct emit_ops
*
2863 if (is_64bit_tdesc ())
2864 return &amd64_emit_ops
;
2867 return &i386_emit_ops
;
2870 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2872 static const gdb_byte
*
2873 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2875 *size
= x86_breakpoint_len
;
2876 return x86_breakpoint
;
2880 x86_supports_range_stepping (void)
2885 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2889 x86_supports_hardware_single_step (void)
2894 /* This is initialized assuming an amd64 target.
2895 x86_arch_setup will correct it for i386 or amd64 targets. */
2897 struct linux_target_ops the_low_target
=
2900 x86_linux_regs_info
,
2901 x86_cannot_fetch_register
,
2902 x86_cannot_store_register
,
2903 NULL
, /* fetch_register */
2906 NULL
, /* breakpoint_kind_from_pc */
2907 x86_sw_breakpoint_from_kind
,
2911 x86_supports_z_point_type
,
2914 x86_stopped_by_watchpoint
,
2915 x86_stopped_data_address
,
2916 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2917 native i386 case (no registers smaller than an xfer unit), and are not
2918 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2921 /* need to fix up i386 siginfo if host is amd64 */
2923 x86_linux_new_process
,
2924 x86_linux_new_thread
,
2926 x86_linux_prepare_to_resume
,
2927 x86_linux_process_qsupported
,
2928 x86_supports_tracepoints
,
2929 x86_get_thread_area
,
2930 x86_install_fast_tracepoint_jump_pad
,
2932 x86_get_min_fast_tracepoint_insn_len
,
2933 x86_supports_range_stepping
,
2934 NULL
, /* breakpoint_kind_from_current_state */
2935 x86_supports_hardware_single_step
,
2936 x86_get_syscall_trapinfo
,
2940 initialize_low_arch (void)
2942 /* Initialize the Linux target descriptions. */
2944 init_registers_amd64_linux ();
2945 init_registers_amd64_avx_linux ();
2946 init_registers_amd64_avx512_linux ();
2947 init_registers_amd64_mpx_linux ();
2949 init_registers_x32_linux ();
2950 init_registers_x32_avx_linux ();
2951 init_registers_x32_avx512_linux ();
2953 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2954 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
2955 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2957 init_registers_i386_linux ();
2958 init_registers_i386_mmx_linux ();
2959 init_registers_i386_avx_linux ();
2960 init_registers_i386_avx512_linux ();
2961 init_registers_i386_mpx_linux ();
2963 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
2964 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
2965 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2967 initialize_regsets_info (&x86_regsets_info
);