1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
111 bool supports_tracepoints () override
;
113 bool supports_fast_tracepoints () override
;
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
117 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
118 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
119 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
120 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
123 int get_min_fast_tracepoint_insn_len () override
;
125 struct emit_ops
*emit_ops () override
;
127 int get_ipa_tdesc_idx () override
;
131 void low_arch_setup () override
;
133 bool low_cannot_fetch_register (int regno
) override
;
135 bool low_cannot_store_register (int regno
) override
;
137 bool low_supports_breakpoints () override
;
139 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
141 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
143 int low_decr_pc_after_break () override
;
145 bool low_breakpoint_at (CORE_ADDR pc
) override
;
147 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
148 int size
, raw_breakpoint
*bp
) override
;
150 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
151 int size
, raw_breakpoint
*bp
) override
;
153 bool low_stopped_by_watchpoint () override
;
155 CORE_ADDR
low_stopped_data_address () override
;
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
163 int direction
) override
;
165 arch_process_info
*low_new_process () override
;
167 void low_delete_process (arch_process_info
*info
) override
;
169 void low_new_thread (lwp_info
*) override
;
171 void low_delete_thread (arch_lwp_info
*) override
;
173 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
175 void low_prepare_to_resume (lwp_info
*lwp
) override
;
177 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
179 bool low_supports_range_stepping () override
;
181 bool low_supports_catch_syscall () override
;
183 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
192 /* The singleton target ops object. */
194 static x86_target the_x86_target
;
196 /* Per-process arch-specific data we want to keep. */
198 struct arch_process_info
200 struct x86_debug_reg_state debug_reg_state
;
205 /* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208 static /*const*/ int i386_regmap
[] =
210 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
211 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
212 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
213 DS
* 8, ES
* 8, FS
* 8, GS
* 8
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218 /* So code below doesn't have to care, i386 or amd64. */
219 #define ORIG_EAX ORIG_RAX
222 static const int x86_64_regmap
[] =
224 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
225 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
226 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
227 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
228 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
229 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
236 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
241 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
242 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
243 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
248 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
249 -1, -1, -1, -1, -1, -1, -1, -1,
250 -1, -1, -1, -1, -1, -1, -1, -1,
251 -1, -1, -1, -1, -1, -1, -1, -1,
255 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
256 #define X86_64_USER_REGS (GS + 1)
258 #else /* ! __x86_64__ */
260 /* Mapping between the general-purpose registers in `struct user'
261 format and GDB's register array layout. */
262 static /*const*/ int i386_regmap
[] =
264 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
265 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
266 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
267 DS
* 4, ES
* 4, FS
* 4, GS
* 4
270 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
278 /* Returns true if the current inferior belongs to a x86-64 process,
282 is_64bit_tdesc (void)
284 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
286 return register_size (regcache
->tdesc
, 0) == 8;
292 /* Called by libthread_db. */
295 ps_get_thread_area (struct ps_prochandle
*ph
,
296 lwpid_t lwpid
, int idx
, void **base
)
299 int use_64bit
= is_64bit_tdesc ();
306 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
310 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
321 unsigned int desc
[4];
323 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
324 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
327 /* Ensure we properly extend the value to 64-bits for x86_64. */
328 *base
= (void *) (uintptr_t) desc
[1];
333 /* Get the thread area address. This is used to recognize which
334 thread is which when tracing with the in-process agent library. We
335 don't read anything from the address, and treat it as opaque; it's
336 the address itself that we assume is unique per-thread. */
339 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
342 int use_64bit
= is_64bit_tdesc ();
347 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
349 *addr
= (CORE_ADDR
) (uintptr_t) base
;
358 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
359 struct thread_info
*thr
= get_lwp_thread (lwp
);
360 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
361 unsigned int desc
[4];
363 const int reg_thread_area
= 3; /* bits to scale down register value. */
366 collect_register_by_name (regcache
, "gs", &gs
);
368 idx
= gs
>> reg_thread_area
;
370 if (ptrace (PTRACE_GET_THREAD_AREA
,
372 (void *) (long) idx
, (unsigned long) &desc
) < 0)
383 x86_target::low_cannot_store_register (int regno
)
386 if (is_64bit_tdesc ())
390 return regno
>= I386_NUM_REGS
;
394 x86_target::low_cannot_fetch_register (int regno
)
397 if (is_64bit_tdesc ())
401 return regno
>= I386_NUM_REGS
;
405 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
410 if (register_size (regcache
->tdesc
, 0) == 8)
412 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
413 if (x86_64_regmap
[i
] != -1)
414 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
416 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
419 int lwpid
= lwpid_of (current_thread
);
421 collect_register_by_name (regcache
, "fs_base", &base
);
422 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
424 collect_register_by_name (regcache
, "gs_base", &base
);
425 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
432 /* 32-bit inferior registers need to be zero-extended.
433 Callers would read uninitialized memory otherwise. */
434 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
437 for (i
= 0; i
< I386_NUM_REGS
; i
++)
438 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
440 collect_register_by_name (regcache
, "orig_eax",
441 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
444 /* Sign extend EAX value to avoid potential syscall restart
447 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
448 for a detailed explanation. */
449 if (register_size (regcache
->tdesc
, 0) == 4)
451 void *ptr
= ((gdb_byte
*) buf
452 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
454 *(int64_t *) ptr
= *(int32_t *) ptr
;
460 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
465 if (register_size (regcache
->tdesc
, 0) == 8)
467 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
468 if (x86_64_regmap
[i
] != -1)
469 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
471 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
474 int lwpid
= lwpid_of (current_thread
);
476 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
477 supply_register_by_name (regcache
, "fs_base", &base
);
479 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
480 supply_register_by_name (regcache
, "gs_base", &base
);
487 for (i
= 0; i
< I386_NUM_REGS
; i
++)
488 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
490 supply_register_by_name (regcache
, "orig_eax",
491 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
495 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
498 i387_cache_to_fxsave (regcache
, buf
);
500 i387_cache_to_fsave (regcache
, buf
);
505 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
508 i387_fxsave_to_cache (regcache
, buf
);
510 i387_fsave_to_cache (regcache
, buf
);
517 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
519 i387_cache_to_fxsave (regcache
, buf
);
523 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
525 i387_fxsave_to_cache (regcache
, buf
);
531 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
533 i387_cache_to_xsave (regcache
, buf
);
537 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
539 i387_xsave_to_cache (regcache
, buf
);
542 /* ??? The non-biarch i386 case stores all the i387 regs twice.
543 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
544 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
545 doesn't work. IWBN to avoid the duplication in the case where it
546 does work. Maybe the arch_setup routine could check whether it works
547 and update the supported regsets accordingly. */
549 static struct regset_info x86_regsets
[] =
551 #ifdef HAVE_PTRACE_GETREGS
552 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
554 x86_fill_gregset
, x86_store_gregset
},
555 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
556 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
558 # ifdef HAVE_PTRACE_GETFPXREGS
559 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
561 x86_fill_fpxregset
, x86_store_fpxregset
},
564 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
566 x86_fill_fpregset
, x86_store_fpregset
},
567 #endif /* HAVE_PTRACE_GETREGS */
572 x86_target::low_supports_breakpoints ()
578 x86_target::low_get_pc (regcache
*regcache
)
580 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
586 collect_register_by_name (regcache
, "rip", &pc
);
587 return (CORE_ADDR
) pc
;
593 collect_register_by_name (regcache
, "eip", &pc
);
594 return (CORE_ADDR
) pc
;
599 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
601 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
607 supply_register_by_name (regcache
, "rip", &newpc
);
613 supply_register_by_name (regcache
, "eip", &newpc
);
618 x86_target::low_decr_pc_after_break ()
624 static const gdb_byte x86_breakpoint
[] = { 0xCC };
625 #define x86_breakpoint_len 1
628 x86_target::low_breakpoint_at (CORE_ADDR pc
)
632 read_memory (pc
, &c
, 1);
639 /* Low-level function vector. */
640 struct x86_dr_low_type x86_dr_low
=
642 x86_linux_dr_set_control
,
643 x86_linux_dr_set_addr
,
644 x86_linux_dr_get_addr
,
645 x86_linux_dr_get_status
,
646 x86_linux_dr_get_control
,
650 /* Breakpoint/Watchpoint support. */
653 x86_target::supports_z_point_type (char z_type
)
659 case Z_PACKET_WRITE_WP
:
660 case Z_PACKET_ACCESS_WP
:
668 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
669 int size
, raw_breakpoint
*bp
)
671 struct process_info
*proc
= current_process ();
675 case raw_bkpt_type_hw
:
676 case raw_bkpt_type_write_wp
:
677 case raw_bkpt_type_access_wp
:
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type
);
681 struct x86_debug_reg_state
*state
682 = &proc
->priv
->arch_private
->debug_reg_state
;
684 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
694 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
695 int size
, raw_breakpoint
*bp
)
697 struct process_info
*proc
= current_process ();
701 case raw_bkpt_type_hw
:
702 case raw_bkpt_type_write_wp
:
703 case raw_bkpt_type_access_wp
:
705 enum target_hw_bp_type hw_type
706 = raw_bkpt_type_to_target_hw_bp_type (type
);
707 struct x86_debug_reg_state
*state
708 = &proc
->priv
->arch_private
->debug_reg_state
;
710 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
719 x86_target::low_stopped_by_watchpoint ()
721 struct process_info
*proc
= current_process ();
722 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
726 x86_target::low_stopped_data_address ()
728 struct process_info
*proc
= current_process ();
730 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
736 /* Called when a new process is created. */
739 x86_target::low_new_process ()
741 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
743 x86_low_init_dregs (&info
->debug_reg_state
);
748 /* Called when a process is being deleted. */
751 x86_target::low_delete_process (arch_process_info
*info
)
757 x86_target::low_new_thread (lwp_info
*lwp
)
759 /* This comes from nat/. */
760 x86_linux_new_thread (lwp
);
764 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
766 /* This comes from nat/. */
767 x86_linux_delete_thread (alwp
);
770 /* Target routine for new_fork. */
773 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
775 /* These are allocated by linux_add_process. */
776 gdb_assert (parent
->priv
!= NULL
777 && parent
->priv
->arch_private
!= NULL
);
778 gdb_assert (child
->priv
!= NULL
779 && child
->priv
->arch_private
!= NULL
);
781 /* Linux kernel before 2.6.33 commit
782 72f674d203cd230426437cdcf7dd6f681dad8b0d
783 will inherit hardware debug registers from parent
784 on fork/vfork/clone. Newer Linux kernels create such tasks with
785 zeroed debug registers.
787 GDB core assumes the child inherits the watchpoints/hw
788 breakpoints of the parent, and will remove them all from the
789 forked off process. Copy the debug registers mirrors into the
790 new process so that all breakpoints and watchpoints can be
791 removed together. The debug registers mirror will become zeroed
792 in the end before detaching the forked off process, thus making
793 this compatible with older Linux kernels too. */
795 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
799 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
801 /* This comes from nat/. */
802 x86_linux_prepare_to_resume (lwp
);
805 /* See nat/x86-dregs.h. */
807 struct x86_debug_reg_state
*
808 x86_debug_reg_state (pid_t pid
)
810 struct process_info
*proc
= find_process_pid (pid
);
812 return &proc
->priv
->arch_private
->debug_reg_state
;
815 /* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
821 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
822 layout of the inferiors' architecture. Returns true if any
823 conversion was done; false otherwise. If DIRECTION is 1, then copy
824 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
828 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
831 unsigned int machine
;
832 int tid
= lwpid_of (current_thread
);
833 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
835 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
836 if (!is_64bit_tdesc ())
837 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
839 /* No fixup for native x32 GDB. */
840 else if (!is_elf64
&& sizeof (void *) == 8)
841 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
850 /* Format of XSAVE extended state is:
854 sw_usable_bytes[464..511]
855 xstate_hdr_bytes[512..575]
860 Same memory layout will be used for the coredump NT_X86_XSTATE
861 representing the XSAVE extended state registers.
863 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
864 extended state mask, which is the same as the extended control register
865 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
866 together with the mask saved in the xstate_hdr_bytes to determine what
867 states the processor/OS supports and what state, used or initialized,
868 the process/thread is in. */
869 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
871 /* Does the current host support the GETFPXREGS request? The header
872 file may or may not define it, and even if it is defined, the
873 kernel will return EIO if it's running on a pre-SSE processor. */
874 int have_ptrace_getfpxregs
=
875 #ifdef HAVE_PTRACE_GETFPXREGS
882 /* Get Linux/x86 target description from running target. */
884 static const struct target_desc
*
885 x86_linux_read_description (void)
887 unsigned int machine
;
891 static uint64_t xcr0
;
892 struct regset_info
*regset
;
894 tid
= lwpid_of (current_thread
);
896 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
898 if (sizeof (void *) == 4)
901 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
903 else if (machine
== EM_X86_64
)
904 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
908 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
909 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
911 elf_fpxregset_t fpxregs
;
913 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
915 have_ptrace_getfpxregs
= 0;
916 have_ptrace_getregset
= 0;
917 return i386_linux_read_description (X86_XSTATE_X87
);
920 have_ptrace_getfpxregs
= 1;
926 x86_xcr0
= X86_XSTATE_SSE_MASK
;
930 if (machine
== EM_X86_64
)
931 return tdesc_amd64_linux_no_xml
;
934 return tdesc_i386_linux_no_xml
;
937 if (have_ptrace_getregset
== -1)
939 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
942 iov
.iov_base
= xstateregs
;
943 iov
.iov_len
= sizeof (xstateregs
);
945 /* Check if PTRACE_GETREGSET works. */
946 if (ptrace (PTRACE_GETREGSET
, tid
,
947 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
948 have_ptrace_getregset
= 0;
951 have_ptrace_getregset
= 1;
953 /* Get XCR0 from XSAVE extended state. */
954 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
955 / sizeof (uint64_t))];
957 /* Use PTRACE_GETREGSET if it is available. */
958 for (regset
= x86_regsets
;
959 regset
->fill_function
!= NULL
; regset
++)
960 if (regset
->get_request
== PTRACE_GETREGSET
)
961 regset
->size
= X86_XSTATE_SIZE (xcr0
);
962 else if (regset
->type
!= GENERAL_REGS
)
967 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
968 xcr0_features
= (have_ptrace_getregset
969 && (xcr0
& X86_XSTATE_ALL_MASK
));
974 if (machine
== EM_X86_64
)
977 const target_desc
*tdesc
= NULL
;
981 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
986 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
992 const target_desc
*tdesc
= NULL
;
995 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
998 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
1003 gdb_assert_not_reached ("failed to return tdesc");
1006 /* Update all the target description of all processes; a new GDB
1007 connected, and it may or not support xml target descriptions. */
1010 x86_target::update_xmltarget ()
1012 struct thread_info
*saved_thread
= current_thread
;
1014 /* Before changing the register cache's internal layout, flush the
1015 contents of the current valid caches back to the threads, and
1016 release the current regcache objects. */
1017 regcache_release ();
1019 for_each_process ([this] (process_info
*proc
) {
1020 int pid
= proc
->pid
;
1022 /* Look up any thread of this process. */
1023 current_thread
= find_any_thread_of_pid (pid
);
1028 current_thread
= saved_thread
;
1031 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1032 PTRACE_GETREGSET. */
1035 x86_target::process_qsupported (char **features
, int count
)
1039 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1040 with "i386" in qSupported query, it supports x86 XML target
1043 for (i
= 0; i
< count
; i
++)
1045 const char *feature
= features
[i
];
1047 if (startswith (feature
, "xmlRegisters="))
1049 char *copy
= xstrdup (feature
+ 13);
1052 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1054 p
= strtok_r (NULL
, ",", &saveptr
))
1056 if (strcmp (p
, "i386") == 0)
1066 update_xmltarget ();
1069 /* Common for x86/x86-64. */
1071 static struct regsets_info x86_regsets_info
=
1073 x86_regsets
, /* regsets */
1074 0, /* num_regsets */
1075 NULL
, /* disabled_regsets */
1079 static struct regs_info amd64_linux_regs_info
=
1081 NULL
, /* regset_bitmap */
1082 NULL
, /* usrregs_info */
1086 static struct usrregs_info i386_linux_usrregs_info
=
1092 static struct regs_info i386_linux_regs_info
=
1094 NULL
, /* regset_bitmap */
1095 &i386_linux_usrregs_info
,
1100 x86_target::get_regs_info ()
1103 if (is_64bit_tdesc ())
1104 return &amd64_linux_regs_info
;
1107 return &i386_linux_regs_info
;
1110 /* Initialize the target description for the architecture of the
1114 x86_target::low_arch_setup ()
1116 current_process ()->tdesc
= x86_linux_read_description ();
1120 x86_target::low_supports_catch_syscall ()
1125 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1126 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1129 x86_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1131 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1137 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1138 *sysno
= (int) l_sysno
;
1141 collect_register_by_name (regcache
, "orig_eax", sysno
);
1145 x86_target::supports_tracepoints ()
1151 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1153 target_write_memory (*to
, buf
, len
);
1158 push_opcode (unsigned char *buf
, const char *op
)
1160 unsigned char *buf_org
= buf
;
1165 unsigned long ul
= strtoul (op
, &endptr
, 16);
1174 return buf
- buf_org
;
1179 /* Build a jump pad that saves registers and calls a collection
1180 function. Writes a jump instruction to the jump pad to
1181 JJUMPAD_INSN. The caller is responsible to write it in at the
1182 tracepoint address. */
1185 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1186 CORE_ADDR collector
,
1189 CORE_ADDR
*jump_entry
,
1190 CORE_ADDR
*trampoline
,
1191 ULONGEST
*trampoline_size
,
1192 unsigned char *jjump_pad_insn
,
1193 ULONGEST
*jjump_pad_insn_size
,
1194 CORE_ADDR
*adjusted_insn_addr
,
1195 CORE_ADDR
*adjusted_insn_addr_end
,
1198 unsigned char buf
[40];
1202 CORE_ADDR buildaddr
= *jump_entry
;
1204 /* Build the jump pad. */
1206 /* First, do tracepoint data collection. Save registers. */
1208 /* Need to ensure stack pointer saved first. */
1209 buf
[i
++] = 0x54; /* push %rsp */
1210 buf
[i
++] = 0x55; /* push %rbp */
1211 buf
[i
++] = 0x57; /* push %rdi */
1212 buf
[i
++] = 0x56; /* push %rsi */
1213 buf
[i
++] = 0x52; /* push %rdx */
1214 buf
[i
++] = 0x51; /* push %rcx */
1215 buf
[i
++] = 0x53; /* push %rbx */
1216 buf
[i
++] = 0x50; /* push %rax */
1217 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1223 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1224 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1225 buf
[i
++] = 0x9c; /* pushfq */
1226 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1228 memcpy (buf
+ i
, &tpaddr
, 8);
1230 buf
[i
++] = 0x57; /* push %rdi */
1231 append_insns (&buildaddr
, i
, buf
);
1233 /* Stack space for the collecting_t object. */
1235 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1236 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1237 memcpy (buf
+ i
, &tpoint
, 8);
1239 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1240 i
+= push_opcode (&buf
[i
],
1241 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1242 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1243 append_insns (&buildaddr
, i
, buf
);
1247 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1248 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1250 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1251 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1252 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1253 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1254 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1255 append_insns (&buildaddr
, i
, buf
);
1257 /* Set up the gdb_collect call. */
1258 /* At this point, (stack pointer + 0x18) is the base of our saved
1262 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1263 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1265 /* tpoint address may be 64-bit wide. */
1266 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1267 memcpy (buf
+ i
, &tpoint
, 8);
1269 append_insns (&buildaddr
, i
, buf
);
1271 /* The collector function being in the shared library, may be
1272 >31-bits away off the jump pad. */
1274 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1275 memcpy (buf
+ i
, &collector
, 8);
1277 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1278 append_insns (&buildaddr
, i
, buf
);
1280 /* Clear the spin-lock. */
1282 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1283 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1284 memcpy (buf
+ i
, &lockaddr
, 8);
1286 append_insns (&buildaddr
, i
, buf
);
1288 /* Remove stack that had been used for the collect_t object. */
1290 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1291 append_insns (&buildaddr
, i
, buf
);
1293 /* Restore register state. */
1295 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1299 buf
[i
++] = 0x9d; /* popfq */
1300 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1301 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1302 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1303 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1304 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1305 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1306 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1307 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1308 buf
[i
++] = 0x58; /* pop %rax */
1309 buf
[i
++] = 0x5b; /* pop %rbx */
1310 buf
[i
++] = 0x59; /* pop %rcx */
1311 buf
[i
++] = 0x5a; /* pop %rdx */
1312 buf
[i
++] = 0x5e; /* pop %rsi */
1313 buf
[i
++] = 0x5f; /* pop %rdi */
1314 buf
[i
++] = 0x5d; /* pop %rbp */
1315 buf
[i
++] = 0x5c; /* pop %rsp */
1316 append_insns (&buildaddr
, i
, buf
);
1318 /* Now, adjust the original instruction to execute in the jump
1320 *adjusted_insn_addr
= buildaddr
;
1321 relocate_instruction (&buildaddr
, tpaddr
);
1322 *adjusted_insn_addr_end
= buildaddr
;
1324 /* Finally, write a jump back to the program. */
1326 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1327 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1330 "E.Jump back from jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64
" > int32).", loffset
);
1335 offset
= (int) loffset
;
1336 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1337 memcpy (buf
+ 1, &offset
, 4);
1338 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1340 /* The jump pad is now built. Wire in a jump to our jump pad. This
1341 is always done last (by our caller actually), so that we can
1342 install fast tracepoints with threads running. This relies on
1343 the agent's atomic write support. */
1344 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1345 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1348 "E.Jump pad too far from tracepoint "
1349 "(offset 0x%" PRIx64
" > int32).", loffset
);
1353 offset
= (int) loffset
;
1355 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1356 memcpy (buf
+ 1, &offset
, 4);
1357 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1358 *jjump_pad_insn_size
= sizeof (jump_insn
);
1360 /* Return the end address of our pad. */
1361 *jump_entry
= buildaddr
;
1366 #endif /* __x86_64__ */
1368 /* Build a jump pad that saves registers and calls a collection
1369 function. Writes a jump instruction to the jump pad to
1370 JJUMPAD_INSN. The caller is responsible to write it in at the
1371 tracepoint address. */
1374 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1375 CORE_ADDR collector
,
1378 CORE_ADDR
*jump_entry
,
1379 CORE_ADDR
*trampoline
,
1380 ULONGEST
*trampoline_size
,
1381 unsigned char *jjump_pad_insn
,
1382 ULONGEST
*jjump_pad_insn_size
,
1383 CORE_ADDR
*adjusted_insn_addr
,
1384 CORE_ADDR
*adjusted_insn_addr_end
,
1387 unsigned char buf
[0x100];
1389 CORE_ADDR buildaddr
= *jump_entry
;
1391 /* Build the jump pad. */
1393 /* First, do tracepoint data collection. Save registers. */
1395 buf
[i
++] = 0x60; /* pushad */
1396 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1397 *((int *)(buf
+ i
)) = (int) tpaddr
;
1399 buf
[i
++] = 0x9c; /* pushf */
1400 buf
[i
++] = 0x1e; /* push %ds */
1401 buf
[i
++] = 0x06; /* push %es */
1402 buf
[i
++] = 0x0f; /* push %fs */
1404 buf
[i
++] = 0x0f; /* push %gs */
1406 buf
[i
++] = 0x16; /* push %ss */
1407 buf
[i
++] = 0x0e; /* push %cs */
1408 append_insns (&buildaddr
, i
, buf
);
1410 /* Stack space for the collecting_t object. */
1412 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1414 /* Build the object. */
1415 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1416 memcpy (buf
+ i
, &tpoint
, 4);
1418 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1420 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1421 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1422 append_insns (&buildaddr
, i
, buf
);
1424 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1425 If we cared for it, this could be using xchg alternatively. */
1428 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1429 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1431 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1433 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1434 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1435 append_insns (&buildaddr
, i
, buf
);
1438 /* Set up arguments to the gdb_collect call. */
1440 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1441 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1442 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1443 append_insns (&buildaddr
, i
, buf
);
1446 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1447 append_insns (&buildaddr
, i
, buf
);
1450 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1451 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1453 append_insns (&buildaddr
, i
, buf
);
1455 buf
[0] = 0xe8; /* call <reladdr> */
1456 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1457 memcpy (buf
+ 1, &offset
, 4);
1458 append_insns (&buildaddr
, 5, buf
);
1459 /* Clean up after the call. */
1460 buf
[0] = 0x83; /* add $0x8,%esp */
1463 append_insns (&buildaddr
, 3, buf
);
1466 /* Clear the spin-lock. This would need the LOCK prefix on older
1469 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1470 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1471 memcpy (buf
+ i
, &lockaddr
, 4);
1473 append_insns (&buildaddr
, i
, buf
);
1476 /* Remove stack that had been used for the collect_t object. */
1478 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1479 append_insns (&buildaddr
, i
, buf
);
1482 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1485 buf
[i
++] = 0x17; /* pop %ss */
1486 buf
[i
++] = 0x0f; /* pop %gs */
1488 buf
[i
++] = 0x0f; /* pop %fs */
1490 buf
[i
++] = 0x07; /* pop %es */
1491 buf
[i
++] = 0x1f; /* pop %ds */
1492 buf
[i
++] = 0x9d; /* popf */
1493 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1496 buf
[i
++] = 0x61; /* popad */
1497 append_insns (&buildaddr
, i
, buf
);
1499 /* Now, adjust the original instruction to execute in the jump
1501 *adjusted_insn_addr
= buildaddr
;
1502 relocate_instruction (&buildaddr
, tpaddr
);
1503 *adjusted_insn_addr_end
= buildaddr
;
1505 /* Write the jump back to the program. */
1506 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1507 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1508 memcpy (buf
+ 1, &offset
, 4);
1509 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1511 /* The jump pad is now built. Wire in a jump to our jump pad. This
1512 is always done last (by our caller actually), so that we can
1513 install fast tracepoints with threads running. This relies on
1514 the agent's atomic write support. */
1517 /* Create a trampoline. */
1518 *trampoline_size
= sizeof (jump_insn
);
1519 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1521 /* No trampoline space available. */
1523 "E.Cannot allocate trampoline space needed for fast "
1524 "tracepoints on 4-byte instructions.");
1528 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1529 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1530 memcpy (buf
+ 1, &offset
, 4);
1531 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1533 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1534 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1535 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1536 memcpy (buf
+ 2, &offset
, 2);
1537 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1538 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1542 /* Else use a 32-bit relative jump instruction. */
1543 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1544 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1545 memcpy (buf
+ 1, &offset
, 4);
1546 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1547 *jjump_pad_insn_size
= sizeof (jump_insn
);
1550 /* Return the end address of our pad. */
1551 *jump_entry
= buildaddr
;
1557 x86_target::supports_fast_tracepoints ()
1563 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1565 CORE_ADDR collector
,
1568 CORE_ADDR
*jump_entry
,
1569 CORE_ADDR
*trampoline
,
1570 ULONGEST
*trampoline_size
,
1571 unsigned char *jjump_pad_insn
,
1572 ULONGEST
*jjump_pad_insn_size
,
1573 CORE_ADDR
*adjusted_insn_addr
,
1574 CORE_ADDR
*adjusted_insn_addr_end
,
1578 if (is_64bit_tdesc ())
1579 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1580 collector
, lockaddr
,
1581 orig_size
, jump_entry
,
1582 trampoline
, trampoline_size
,
1584 jjump_pad_insn_size
,
1586 adjusted_insn_addr_end
,
1590 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1591 collector
, lockaddr
,
1592 orig_size
, jump_entry
,
1593 trampoline
, trampoline_size
,
1595 jjump_pad_insn_size
,
1597 adjusted_insn_addr_end
,
1601 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1605 x86_target::get_min_fast_tracepoint_insn_len ()
1607 static int warned_about_fast_tracepoints
= 0;
1610 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1611 used for fast tracepoints. */
1612 if (is_64bit_tdesc ())
1616 if (agent_loaded_p ())
1618 char errbuf
[IPA_BUFSIZ
];
1622 /* On x86, if trampolines are available, then 4-byte jump instructions
1623 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1624 with a 4-byte offset are used instead. */
1625 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1629 /* GDB has no channel to explain to user why a shorter fast
1630 tracepoint is not possible, but at least make GDBserver
1631 mention that something has gone awry. */
1632 if (!warned_about_fast_tracepoints
)
1634 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1635 warned_about_fast_tracepoints
= 1;
1642 /* Indicate that the minimum length is currently unknown since the IPA
1643 has not loaded yet. */
1649 add_insns (unsigned char *start
, int len
)
1651 CORE_ADDR buildaddr
= current_insn_ptr
;
1654 debug_printf ("Adding %d bytes of insn at %s\n",
1655 len
, paddress (buildaddr
));
1657 append_insns (&buildaddr
, len
, start
);
1658 current_insn_ptr
= buildaddr
;
1661 /* Our general strategy for emitting code is to avoid specifying raw
1662 bytes whenever possible, and instead copy a block of inline asm
1663 that is embedded in the function. This is a little messy, because
1664 we need to keep the compiler from discarding what looks like dead
1665 code, plus suppress various warnings. */
1667 #define EMIT_ASM(NAME, INSNS) \
1670 extern unsigned char start_ ## NAME, end_ ## NAME; \
1671 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1672 __asm__ ("jmp end_" #NAME "\n" \
1673 "\t" "start_" #NAME ":" \
1675 "\t" "end_" #NAME ":"); \
1680 #define EMIT_ASM32(NAME,INSNS) \
1683 extern unsigned char start_ ## NAME, end_ ## NAME; \
1684 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1685 __asm__ (".code32\n" \
1686 "\t" "jmp end_" #NAME "\n" \
1687 "\t" "start_" #NAME ":\n" \
1689 "\t" "end_" #NAME ":\n" \
1695 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1702 amd64_emit_prologue (void)
1704 EMIT_ASM (amd64_prologue
,
1706 "movq %rsp,%rbp\n\t"
1707 "sub $0x20,%rsp\n\t"
1708 "movq %rdi,-8(%rbp)\n\t"
1709 "movq %rsi,-16(%rbp)");
1714 amd64_emit_epilogue (void)
1716 EMIT_ASM (amd64_epilogue
,
1717 "movq -16(%rbp),%rdi\n\t"
1718 "movq %rax,(%rdi)\n\t"
1725 amd64_emit_add (void)
1727 EMIT_ASM (amd64_add
,
1728 "add (%rsp),%rax\n\t"
1729 "lea 0x8(%rsp),%rsp");
1733 amd64_emit_sub (void)
1735 EMIT_ASM (amd64_sub
,
1736 "sub %rax,(%rsp)\n\t"
1741 amd64_emit_mul (void)
1747 amd64_emit_lsh (void)
1753 amd64_emit_rsh_signed (void)
1759 amd64_emit_rsh_unsigned (void)
1765 amd64_emit_ext (int arg
)
1770 EMIT_ASM (amd64_ext_8
,
1776 EMIT_ASM (amd64_ext_16
,
1781 EMIT_ASM (amd64_ext_32
,
1790 amd64_emit_log_not (void)
1792 EMIT_ASM (amd64_log_not
,
1793 "test %rax,%rax\n\t"
1799 amd64_emit_bit_and (void)
1801 EMIT_ASM (amd64_and
,
1802 "and (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1807 amd64_emit_bit_or (void)
1810 "or (%rsp),%rax\n\t"
1811 "lea 0x8(%rsp),%rsp");
1815 amd64_emit_bit_xor (void)
1817 EMIT_ASM (amd64_xor
,
1818 "xor (%rsp),%rax\n\t"
1819 "lea 0x8(%rsp),%rsp");
1823 amd64_emit_bit_not (void)
1825 EMIT_ASM (amd64_bit_not
,
1826 "xorq $0xffffffffffffffff,%rax");
1830 amd64_emit_equal (void)
1832 EMIT_ASM (amd64_equal
,
1833 "cmp %rax,(%rsp)\n\t"
1834 "je .Lamd64_equal_true\n\t"
1836 "jmp .Lamd64_equal_end\n\t"
1837 ".Lamd64_equal_true:\n\t"
1839 ".Lamd64_equal_end:\n\t"
1840 "lea 0x8(%rsp),%rsp");
1844 amd64_emit_less_signed (void)
1846 EMIT_ASM (amd64_less_signed
,
1847 "cmp %rax,(%rsp)\n\t"
1848 "jl .Lamd64_less_signed_true\n\t"
1850 "jmp .Lamd64_less_signed_end\n\t"
1851 ".Lamd64_less_signed_true:\n\t"
1853 ".Lamd64_less_signed_end:\n\t"
1854 "lea 0x8(%rsp),%rsp");
1858 amd64_emit_less_unsigned (void)
1860 EMIT_ASM (amd64_less_unsigned
,
1861 "cmp %rax,(%rsp)\n\t"
1862 "jb .Lamd64_less_unsigned_true\n\t"
1864 "jmp .Lamd64_less_unsigned_end\n\t"
1865 ".Lamd64_less_unsigned_true:\n\t"
1867 ".Lamd64_less_unsigned_end:\n\t"
1868 "lea 0x8(%rsp),%rsp");
1872 amd64_emit_ref (int size
)
1877 EMIT_ASM (amd64_ref1
,
1881 EMIT_ASM (amd64_ref2
,
1885 EMIT_ASM (amd64_ref4
,
1886 "movl (%rax),%eax");
1889 EMIT_ASM (amd64_ref8
,
1890 "movq (%rax),%rax");
1896 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1898 EMIT_ASM (amd64_if_goto
,
1902 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1910 amd64_emit_goto (int *offset_p
, int *size_p
)
1912 EMIT_ASM (amd64_goto
,
1913 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1921 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1923 int diff
= (to
- (from
+ size
));
1924 unsigned char buf
[sizeof (int)];
1932 memcpy (buf
, &diff
, sizeof (int));
1933 target_write_memory (from
, buf
, sizeof (int));
1937 amd64_emit_const (LONGEST num
)
1939 unsigned char buf
[16];
1941 CORE_ADDR buildaddr
= current_insn_ptr
;
1944 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1945 memcpy (&buf
[i
], &num
, sizeof (num
));
1947 append_insns (&buildaddr
, i
, buf
);
1948 current_insn_ptr
= buildaddr
;
1952 amd64_emit_call (CORE_ADDR fn
)
1954 unsigned char buf
[16];
1956 CORE_ADDR buildaddr
;
1959 /* The destination function being in the shared library, may be
1960 >31-bits away off the compiled code pad. */
1962 buildaddr
= current_insn_ptr
;
1964 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1968 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1970 /* Offset is too large for a call. Use callq, but that requires
1971 a register, so avoid it if possible. Use r10, since it is
1972 call-clobbered, we don't have to push/pop it. */
1973 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1975 memcpy (buf
+ i
, &fn
, 8);
1977 buf
[i
++] = 0xff; /* callq *%r10 */
1982 int offset32
= offset64
; /* we know we can't overflow here. */
1984 buf
[i
++] = 0xe8; /* call <reladdr> */
1985 memcpy (buf
+ i
, &offset32
, 4);
1989 append_insns (&buildaddr
, i
, buf
);
1990 current_insn_ptr
= buildaddr
;
1994 amd64_emit_reg (int reg
)
1996 unsigned char buf
[16];
1998 CORE_ADDR buildaddr
;
2000 /* Assume raw_regs is still in %rdi. */
2001 buildaddr
= current_insn_ptr
;
2003 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2004 memcpy (&buf
[i
], ®
, sizeof (reg
));
2006 append_insns (&buildaddr
, i
, buf
);
2007 current_insn_ptr
= buildaddr
;
2008 amd64_emit_call (get_raw_reg_func_addr ());
2012 amd64_emit_pop (void)
2014 EMIT_ASM (amd64_pop
,
2019 amd64_emit_stack_flush (void)
2021 EMIT_ASM (amd64_stack_flush
,
2026 amd64_emit_zero_ext (int arg
)
2031 EMIT_ASM (amd64_zero_ext_8
,
2035 EMIT_ASM (amd64_zero_ext_16
,
2036 "and $0xffff,%rax");
2039 EMIT_ASM (amd64_zero_ext_32
,
2040 "mov $0xffffffff,%rcx\n\t"
2049 amd64_emit_swap (void)
2051 EMIT_ASM (amd64_swap
,
2058 amd64_emit_stack_adjust (int n
)
2060 unsigned char buf
[16];
2062 CORE_ADDR buildaddr
= current_insn_ptr
;
2065 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2069 /* This only handles adjustments up to 16, but we don't expect any more. */
2071 append_insns (&buildaddr
, i
, buf
);
2072 current_insn_ptr
= buildaddr
;
2075 /* FN's prototype is `LONGEST(*fn)(int)'. */
2078 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2080 unsigned char buf
[16];
2082 CORE_ADDR buildaddr
;
2084 buildaddr
= current_insn_ptr
;
2086 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2087 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2089 append_insns (&buildaddr
, i
, buf
);
2090 current_insn_ptr
= buildaddr
;
2091 amd64_emit_call (fn
);
2094 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2097 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2099 unsigned char buf
[16];
2101 CORE_ADDR buildaddr
;
2103 buildaddr
= current_insn_ptr
;
2105 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2106 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2108 append_insns (&buildaddr
, i
, buf
);
2109 current_insn_ptr
= buildaddr
;
2110 EMIT_ASM (amd64_void_call_2_a
,
2111 /* Save away a copy of the stack top. */
2113 /* Also pass top as the second argument. */
2115 amd64_emit_call (fn
);
2116 EMIT_ASM (amd64_void_call_2_b
,
2117 /* Restore the stack top, %rax may have been trashed. */
2122 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2125 "cmp %rax,(%rsp)\n\t"
2126 "jne .Lamd64_eq_fallthru\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2129 /* jmp, but don't trust the assembler to choose the right jump */
2130 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2131 ".Lamd64_eq_fallthru:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2142 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2145 "cmp %rax,(%rsp)\n\t"
2146 "je .Lamd64_ne_fallthru\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2149 /* jmp, but don't trust the assembler to choose the right jump */
2150 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2151 ".Lamd64_ne_fallthru:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2162 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2165 "cmp %rax,(%rsp)\n\t"
2166 "jnl .Lamd64_lt_fallthru\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2169 /* jmp, but don't trust the assembler to choose the right jump */
2170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2171 ".Lamd64_lt_fallthru:\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2182 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2185 "cmp %rax,(%rsp)\n\t"
2186 "jnle .Lamd64_le_fallthru\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2189 /* jmp, but don't trust the assembler to choose the right jump */
2190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2191 ".Lamd64_le_fallthru:\n\t"
2192 "lea 0x8(%rsp),%rsp\n\t"
2202 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2205 "cmp %rax,(%rsp)\n\t"
2206 "jng .Lamd64_gt_fallthru\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2209 /* jmp, but don't trust the assembler to choose the right jump */
2210 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2211 ".Lamd64_gt_fallthru:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2222 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2225 "cmp %rax,(%rsp)\n\t"
2226 "jnge .Lamd64_ge_fallthru\n\t"
2227 ".Lamd64_ge_jump:\n\t"
2228 "lea 0x8(%rsp),%rsp\n\t"
2230 /* jmp, but don't trust the assembler to choose the right jump */
2231 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2232 ".Lamd64_ge_fallthru:\n\t"
2233 "lea 0x8(%rsp),%rsp\n\t"
2242 struct emit_ops amd64_emit_ops
=
2244 amd64_emit_prologue
,
2245 amd64_emit_epilogue
,
2250 amd64_emit_rsh_signed
,
2251 amd64_emit_rsh_unsigned
,
2259 amd64_emit_less_signed
,
2260 amd64_emit_less_unsigned
,
2264 amd64_write_goto_address
,
2269 amd64_emit_stack_flush
,
2270 amd64_emit_zero_ext
,
2272 amd64_emit_stack_adjust
,
2273 amd64_emit_int_call_1
,
2274 amd64_emit_void_call_2
,
2283 #endif /* __x86_64__ */
2286 i386_emit_prologue (void)
2288 EMIT_ASM32 (i386_prologue
,
2292 /* At this point, the raw regs base address is at 8(%ebp), and the
2293 value pointer is at 12(%ebp). */
2297 i386_emit_epilogue (void)
2299 EMIT_ASM32 (i386_epilogue
,
2300 "mov 12(%ebp),%ecx\n\t"
2301 "mov %eax,(%ecx)\n\t"
2302 "mov %ebx,0x4(%ecx)\n\t"
2310 i386_emit_add (void)
2312 EMIT_ASM32 (i386_add
,
2313 "add (%esp),%eax\n\t"
2314 "adc 0x4(%esp),%ebx\n\t"
2315 "lea 0x8(%esp),%esp");
2319 i386_emit_sub (void)
2321 EMIT_ASM32 (i386_sub
,
2322 "subl %eax,(%esp)\n\t"
2323 "sbbl %ebx,4(%esp)\n\t"
2329 i386_emit_mul (void)
2335 i386_emit_lsh (void)
2341 i386_emit_rsh_signed (void)
2347 i386_emit_rsh_unsigned (void)
2353 i386_emit_ext (int arg
)
2358 EMIT_ASM32 (i386_ext_8
,
2361 "movl %eax,%ebx\n\t"
2365 EMIT_ASM32 (i386_ext_16
,
2367 "movl %eax,%ebx\n\t"
2371 EMIT_ASM32 (i386_ext_32
,
2372 "movl %eax,%ebx\n\t"
2381 i386_emit_log_not (void)
2383 EMIT_ASM32 (i386_log_not
,
2385 "test %eax,%eax\n\t"
2392 i386_emit_bit_and (void)
2394 EMIT_ASM32 (i386_and
,
2395 "and (%esp),%eax\n\t"
2396 "and 0x4(%esp),%ebx\n\t"
2397 "lea 0x8(%esp),%esp");
2401 i386_emit_bit_or (void)
2403 EMIT_ASM32 (i386_or
,
2404 "or (%esp),%eax\n\t"
2405 "or 0x4(%esp),%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2410 i386_emit_bit_xor (void)
2412 EMIT_ASM32 (i386_xor
,
2413 "xor (%esp),%eax\n\t"
2414 "xor 0x4(%esp),%ebx\n\t"
2415 "lea 0x8(%esp),%esp");
2419 i386_emit_bit_not (void)
2421 EMIT_ASM32 (i386_bit_not
,
2422 "xor $0xffffffff,%eax\n\t"
2423 "xor $0xffffffff,%ebx\n\t");
2427 i386_emit_equal (void)
2429 EMIT_ASM32 (i386_equal
,
2430 "cmpl %ebx,4(%esp)\n\t"
2431 "jne .Li386_equal_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "je .Li386_equal_true\n\t"
2434 ".Li386_equal_false:\n\t"
2436 "jmp .Li386_equal_end\n\t"
2437 ".Li386_equal_true:\n\t"
2439 ".Li386_equal_end:\n\t"
2441 "lea 0x8(%esp),%esp");
2445 i386_emit_less_signed (void)
2447 EMIT_ASM32 (i386_less_signed
,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jl .Li386_less_signed_true\n\t"
2450 "jne .Li386_less_signed_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jl .Li386_less_signed_true\n\t"
2453 ".Li386_less_signed_false:\n\t"
2455 "jmp .Li386_less_signed_end\n\t"
2456 ".Li386_less_signed_true:\n\t"
2458 ".Li386_less_signed_end:\n\t"
2460 "lea 0x8(%esp),%esp");
2464 i386_emit_less_unsigned (void)
2466 EMIT_ASM32 (i386_less_unsigned
,
2467 "cmpl %ebx,4(%esp)\n\t"
2468 "jb .Li386_less_unsigned_true\n\t"
2469 "jne .Li386_less_unsigned_false\n\t"
2470 "cmpl %eax,(%esp)\n\t"
2471 "jb .Li386_less_unsigned_true\n\t"
2472 ".Li386_less_unsigned_false:\n\t"
2474 "jmp .Li386_less_unsigned_end\n\t"
2475 ".Li386_less_unsigned_true:\n\t"
2477 ".Li386_less_unsigned_end:\n\t"
2479 "lea 0x8(%esp),%esp");
2483 i386_emit_ref (int size
)
2488 EMIT_ASM32 (i386_ref1
,
2492 EMIT_ASM32 (i386_ref2
,
2496 EMIT_ASM32 (i386_ref4
,
2497 "movl (%eax),%eax");
2500 EMIT_ASM32 (i386_ref8
,
2501 "movl 4(%eax),%ebx\n\t"
2502 "movl (%eax),%eax");
2508 i386_emit_if_goto (int *offset_p
, int *size_p
)
2510 EMIT_ASM32 (i386_if_goto
,
2516 /* Don't trust the assembler to choose the right jump */
2517 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2520 *offset_p
= 11; /* be sure that this matches the sequence above */
2526 i386_emit_goto (int *offset_p
, int *size_p
)
2528 EMIT_ASM32 (i386_goto
,
2529 /* Don't trust the assembler to choose the right jump */
2530 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2538 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2540 int diff
= (to
- (from
+ size
));
2541 unsigned char buf
[sizeof (int)];
2543 /* We're only doing 4-byte sizes at the moment. */
2550 memcpy (buf
, &diff
, sizeof (int));
2551 target_write_memory (from
, buf
, sizeof (int));
2555 i386_emit_const (LONGEST num
)
2557 unsigned char buf
[16];
2559 CORE_ADDR buildaddr
= current_insn_ptr
;
2562 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2563 lo
= num
& 0xffffffff;
2564 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2566 hi
= ((num
>> 32) & 0xffffffff);
2569 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2570 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2575 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2577 append_insns (&buildaddr
, i
, buf
);
2578 current_insn_ptr
= buildaddr
;
2582 i386_emit_call (CORE_ADDR fn
)
2584 unsigned char buf
[16];
2586 CORE_ADDR buildaddr
;
2588 buildaddr
= current_insn_ptr
;
2590 buf
[i
++] = 0xe8; /* call <reladdr> */
2591 offset
= ((int) fn
) - (buildaddr
+ 5);
2592 memcpy (buf
+ 1, &offset
, 4);
2593 append_insns (&buildaddr
, 5, buf
);
2594 current_insn_ptr
= buildaddr
;
2598 i386_emit_reg (int reg
)
2600 unsigned char buf
[16];
2602 CORE_ADDR buildaddr
;
2604 EMIT_ASM32 (i386_reg_a
,
2606 buildaddr
= current_insn_ptr
;
2608 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2609 memcpy (&buf
[i
], ®
, sizeof (reg
));
2611 append_insns (&buildaddr
, i
, buf
);
2612 current_insn_ptr
= buildaddr
;
2613 EMIT_ASM32 (i386_reg_b
,
2614 "mov %eax,4(%esp)\n\t"
2615 "mov 8(%ebp),%eax\n\t"
2617 i386_emit_call (get_raw_reg_func_addr ());
2618 EMIT_ASM32 (i386_reg_c
,
2620 "lea 0x8(%esp),%esp");
2624 i386_emit_pop (void)
2626 EMIT_ASM32 (i386_pop
,
2632 i386_emit_stack_flush (void)
2634 EMIT_ASM32 (i386_stack_flush
,
2640 i386_emit_zero_ext (int arg
)
2645 EMIT_ASM32 (i386_zero_ext_8
,
2646 "and $0xff,%eax\n\t"
2650 EMIT_ASM32 (i386_zero_ext_16
,
2651 "and $0xffff,%eax\n\t"
2655 EMIT_ASM32 (i386_zero_ext_32
,
2664 i386_emit_swap (void)
2666 EMIT_ASM32 (i386_swap
,
2676 i386_emit_stack_adjust (int n
)
2678 unsigned char buf
[16];
2680 CORE_ADDR buildaddr
= current_insn_ptr
;
2683 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2687 append_insns (&buildaddr
, i
, buf
);
2688 current_insn_ptr
= buildaddr
;
2691 /* FN's prototype is `LONGEST(*fn)(int)'. */
2694 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2696 unsigned char buf
[16];
2698 CORE_ADDR buildaddr
;
2700 EMIT_ASM32 (i386_int_call_1_a
,
2701 /* Reserve a bit of stack space. */
2703 /* Put the one argument on the stack. */
2704 buildaddr
= current_insn_ptr
;
2706 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2709 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2711 append_insns (&buildaddr
, i
, buf
);
2712 current_insn_ptr
= buildaddr
;
2713 i386_emit_call (fn
);
2714 EMIT_ASM32 (i386_int_call_1_c
,
2716 "lea 0x8(%esp),%esp");
2719 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2722 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2724 unsigned char buf
[16];
2726 CORE_ADDR buildaddr
;
2728 EMIT_ASM32 (i386_void_call_2_a
,
2729 /* Preserve %eax only; we don't have to worry about %ebx. */
2731 /* Reserve a bit of stack space for arguments. */
2732 "sub $0x10,%esp\n\t"
2733 /* Copy "top" to the second argument position. (Note that
2734 we can't assume function won't scribble on its
2735 arguments, so don't try to restore from this.) */
2736 "mov %eax,4(%esp)\n\t"
2737 "mov %ebx,8(%esp)");
2738 /* Put the first argument on the stack. */
2739 buildaddr
= current_insn_ptr
;
2741 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2744 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2746 append_insns (&buildaddr
, i
, buf
);
2747 current_insn_ptr
= buildaddr
;
2748 i386_emit_call (fn
);
2749 EMIT_ASM32 (i386_void_call_2_b
,
2750 "lea 0x10(%esp),%esp\n\t"
2751 /* Restore original stack top. */
2757 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2760 /* Check low half first, more likely to be decider */
2761 "cmpl %eax,(%esp)\n\t"
2762 "jne .Leq_fallthru\n\t"
2763 "cmpl %ebx,4(%esp)\n\t"
2764 "jne .Leq_fallthru\n\t"
2765 "lea 0x8(%esp),%esp\n\t"
2768 /* jmp, but don't trust the assembler to choose the right jump */
2769 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2770 ".Leq_fallthru:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2782 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2785 /* Check low half first, more likely to be decider */
2786 "cmpl %eax,(%esp)\n\t"
2788 "cmpl %ebx,4(%esp)\n\t"
2789 "je .Lne_fallthru\n\t"
2791 "lea 0x8(%esp),%esp\n\t"
2794 /* jmp, but don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2796 ".Lne_fallthru:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2808 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2811 "cmpl %ebx,4(%esp)\n\t"
2813 "jne .Llt_fallthru\n\t"
2814 "cmpl %eax,(%esp)\n\t"
2815 "jnl .Llt_fallthru\n\t"
2817 "lea 0x8(%esp),%esp\n\t"
2820 /* jmp, but don't trust the assembler to choose the right jump */
2821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2822 ".Llt_fallthru:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2834 i386_emit_le_goto (int *offset_p
, int *size_p
)
2837 "cmpl %ebx,4(%esp)\n\t"
2839 "jne .Lle_fallthru\n\t"
2840 "cmpl %eax,(%esp)\n\t"
2841 "jnle .Lle_fallthru\n\t"
2843 "lea 0x8(%esp),%esp\n\t"
2846 /* jmp, but don't trust the assembler to choose the right jump */
2847 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2848 ".Lle_fallthru:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2860 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2863 "cmpl %ebx,4(%esp)\n\t"
2865 "jne .Lgt_fallthru\n\t"
2866 "cmpl %eax,(%esp)\n\t"
2867 "jng .Lgt_fallthru\n\t"
2869 "lea 0x8(%esp),%esp\n\t"
2872 /* jmp, but don't trust the assembler to choose the right jump */
2873 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2874 ".Lgt_fallthru:\n\t"
2875 "lea 0x8(%esp),%esp\n\t"
2886 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2889 "cmpl %ebx,4(%esp)\n\t"
2891 "jne .Lge_fallthru\n\t"
2892 "cmpl %eax,(%esp)\n\t"
2893 "jnge .Lge_fallthru\n\t"
2895 "lea 0x8(%esp),%esp\n\t"
2898 /* jmp, but don't trust the assembler to choose the right jump */
2899 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2900 ".Lge_fallthru:\n\t"
2901 "lea 0x8(%esp),%esp\n\t"
2911 struct emit_ops i386_emit_ops
=
2919 i386_emit_rsh_signed
,
2920 i386_emit_rsh_unsigned
,
2928 i386_emit_less_signed
,
2929 i386_emit_less_unsigned
,
2933 i386_write_goto_address
,
2938 i386_emit_stack_flush
,
2941 i386_emit_stack_adjust
,
2942 i386_emit_int_call_1
,
2943 i386_emit_void_call_2
,
2954 x86_target::emit_ops ()
2957 if (is_64bit_tdesc ())
2958 return &amd64_emit_ops
;
2961 return &i386_emit_ops
;
2964 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2967 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2969 *size
= x86_breakpoint_len
;
2970 return x86_breakpoint
;
2974 x86_target::low_supports_range_stepping ()
2980 x86_target::get_ipa_tdesc_idx ()
2982 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2983 const struct target_desc
*tdesc
= regcache
->tdesc
;
2986 return amd64_get_ipa_tdesc_idx (tdesc
);
2989 if (tdesc
== tdesc_i386_linux_no_xml
)
2990 return X86_TDESC_SSE
;
2992 return i386_get_ipa_tdesc_idx (tdesc
);
2995 /* The linux target ops object. */
2997 linux_process_target
*the_linux_target
= &the_x86_target
;
3000 initialize_low_arch (void)
3002 /* Initialize the Linux target descriptions. */
3004 tdesc_amd64_linux_no_xml
= allocate_target_description ();
3005 copy_target_description (tdesc_amd64_linux_no_xml
,
3006 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
3008 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3011 tdesc_i386_linux_no_xml
= allocate_target_description ();
3012 copy_target_description (tdesc_i386_linux_no_xml
,
3013 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3014 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3016 initialize_regsets_info (&x86_regsets_info
);