1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42 #include "nat/x86-linux-dregs.h"
45 /* Defined in auto-generated file amd64-linux.c. */
46 void init_registers_amd64_linux (void);
47 extern const struct target_desc
*tdesc_amd64_linux
;
49 /* Defined in auto-generated file amd64-avx-linux.c. */
50 void init_registers_amd64_avx_linux (void);
51 extern const struct target_desc
*tdesc_amd64_avx_linux
;
53 /* Defined in auto-generated file amd64-avx512-linux.c. */
54 void init_registers_amd64_avx512_linux (void);
55 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
57 /* Defined in auto-generated file amd64-mpx-linux.c. */
58 void init_registers_amd64_mpx_linux (void);
59 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
61 /* Defined in auto-generated file x32-linux.c. */
62 void init_registers_x32_linux (void);
63 extern const struct target_desc
*tdesc_x32_linux
;
65 /* Defined in auto-generated file x32-avx-linux.c. */
66 void init_registers_x32_avx_linux (void);
67 extern const struct target_desc
*tdesc_x32_avx_linux
;
69 /* Defined in auto-generated file x32-avx512-linux.c. */
70 void init_registers_x32_avx512_linux (void);
71 extern const struct target_desc
*tdesc_x32_avx512_linux
;
75 /* Defined in auto-generated file i386-linux.c. */
76 void init_registers_i386_linux (void);
77 extern const struct target_desc
*tdesc_i386_linux
;
79 /* Defined in auto-generated file i386-mmx-linux.c. */
80 void init_registers_i386_mmx_linux (void);
81 extern const struct target_desc
*tdesc_i386_mmx_linux
;
83 /* Defined in auto-generated file i386-avx-linux.c. */
84 void init_registers_i386_avx_linux (void);
85 extern const struct target_desc
*tdesc_i386_avx_linux
;
87 /* Defined in auto-generated file i386-avx512-linux.c. */
88 void init_registers_i386_avx512_linux (void);
89 extern const struct target_desc
*tdesc_i386_avx512_linux
;
91 /* Defined in auto-generated file i386-mpx-linux.c. */
92 void init_registers_i386_mpx_linux (void);
93 extern const struct target_desc
*tdesc_i386_mpx_linux
;
96 static struct target_desc
*tdesc_amd64_linux_no_xml
;
98 static struct target_desc
*tdesc_i386_linux_no_xml
;
101 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
102 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
104 /* Backward compatibility for gdb without XML support. */
106 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
107 <architecture>i386</architecture>\
108 <osabi>GNU/Linux</osabi>\
112 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
113 <architecture>i386:x86-64</architecture>\
114 <osabi>GNU/Linux</osabi>\
119 #include <sys/procfs.h>
120 #include <sys/ptrace.h>
123 #ifndef PTRACE_GETREGSET
124 #define PTRACE_GETREGSET 0x4204
127 #ifndef PTRACE_SETREGSET
128 #define PTRACE_SETREGSET 0x4205
132 #ifndef PTRACE_GET_THREAD_AREA
133 #define PTRACE_GET_THREAD_AREA 25
136 /* This definition comes from prctl.h, but some kernels may not have it. */
137 #ifndef PTRACE_ARCH_PRCTL
138 #define PTRACE_ARCH_PRCTL 30
141 /* The following definitions come from prctl.h, but may be absent
142 for certain configurations. */
144 #define ARCH_SET_GS 0x1001
145 #define ARCH_SET_FS 0x1002
146 #define ARCH_GET_FS 0x1003
147 #define ARCH_GET_GS 0x1004
150 /* Per-process arch-specific data we want to keep. */
152 struct arch_process_info
154 struct x86_debug_reg_state debug_reg_state
;
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162 static /*const*/ int i386_regmap
[] =
164 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
165 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
166 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
167 DS
* 8, ES
* 8, FS
* 8, GS
* 8
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
172 /* So code below doesn't have to care, i386 or amd64. */
173 #define ORIG_EAX ORIG_RAX
176 static const int x86_64_regmap
[] =
178 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
179 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
180 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
181 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
182 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
183 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
191 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
192 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1
203 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
204 #define X86_64_USER_REGS (GS + 1)
206 #else /* ! __x86_64__ */
208 /* Mapping between the general-purpose registers in `struct user'
209 format and GDB's register array layout. */
210 static /*const*/ int i386_regmap
[] =
212 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
213 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
214 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
215 DS
* 4, ES
* 4, FS
* 4, GS
* 4
218 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
226 /* Returns true if the current inferior belongs to a x86-64 process,
230 is_64bit_tdesc (void)
232 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
234 return register_size (regcache
->tdesc
, 0) == 8;
240 /* Called by libthread_db. */
243 ps_get_thread_area (const struct ps_prochandle
*ph
,
244 lwpid_t lwpid
, int idx
, void **base
)
247 int use_64bit
= is_64bit_tdesc ();
254 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
258 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
269 unsigned int desc
[4];
271 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
272 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
275 /* Ensure we properly extend the value to 64-bits for x86_64. */
276 *base
= (void *) (uintptr_t) desc
[1];
281 /* Get the thread area address. This is used to recognize which
282 thread is which when tracing with the in-process agent library. We
283 don't read anything from the address, and treat it as opaque; it's
284 the address itself that we assume is unique per-thread. */
287 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
290 int use_64bit
= is_64bit_tdesc ();
295 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
297 *addr
= (CORE_ADDR
) (uintptr_t) base
;
306 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
307 struct thread_info
*thr
= get_lwp_thread (lwp
);
308 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
309 unsigned int desc
[4];
311 const int reg_thread_area
= 3; /* bits to scale down register value. */
314 collect_register_by_name (regcache
, "gs", &gs
);
316 idx
= gs
>> reg_thread_area
;
318 if (ptrace (PTRACE_GET_THREAD_AREA
,
320 (void *) (long) idx
, (unsigned long) &desc
) < 0)
331 x86_cannot_store_register (int regno
)
334 if (is_64bit_tdesc ())
338 return regno
>= I386_NUM_REGS
;
342 x86_cannot_fetch_register (int regno
)
345 if (is_64bit_tdesc ())
349 return regno
>= I386_NUM_REGS
;
353 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
358 if (register_size (regcache
->tdesc
, 0) == 8)
360 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
361 if (x86_64_regmap
[i
] != -1)
362 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 collect_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
379 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
384 if (register_size (regcache
->tdesc
, 0) == 8)
386 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
387 if (x86_64_regmap
[i
] != -1)
388 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
393 for (i
= 0; i
< I386_NUM_REGS
; i
++)
394 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
396 supply_register_by_name (regcache
, "orig_eax",
397 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
401 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
406 i387_cache_to_fsave (regcache
, buf
);
411 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
414 i387_fxsave_to_cache (regcache
, buf
);
416 i387_fsave_to_cache (regcache
, buf
);
423 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
425 i387_cache_to_fxsave (regcache
, buf
);
429 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
431 i387_fxsave_to_cache (regcache
, buf
);
437 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
439 i387_cache_to_xsave (regcache
, buf
);
443 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
445 i387_xsave_to_cache (regcache
, buf
);
448 /* ??? The non-biarch i386 case stores all the i387 regs twice.
449 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
450 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
451 doesn't work. IWBN to avoid the duplication in the case where it
452 does work. Maybe the arch_setup routine could check whether it works
453 and update the supported regsets accordingly. */
455 static struct regset_info x86_regsets
[] =
457 #ifdef HAVE_PTRACE_GETREGS
458 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
460 x86_fill_gregset
, x86_store_gregset
},
461 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
462 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
464 # ifdef HAVE_PTRACE_GETFPXREGS
465 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
467 x86_fill_fpxregset
, x86_store_fpxregset
},
470 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
472 x86_fill_fpregset
, x86_store_fpregset
},
473 #endif /* HAVE_PTRACE_GETREGS */
474 { 0, 0, 0, -1, -1, NULL
, NULL
}
478 x86_get_pc (struct regcache
*regcache
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
485 collect_register_by_name (regcache
, "rip", &pc
);
486 return (CORE_ADDR
) pc
;
491 collect_register_by_name (regcache
, "eip", &pc
);
492 return (CORE_ADDR
) pc
;
497 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
499 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
503 unsigned long newpc
= pc
;
504 supply_register_by_name (regcache
, "rip", &newpc
);
508 unsigned int newpc
= pc
;
509 supply_register_by_name (regcache
, "eip", &newpc
);
513 static const unsigned char x86_breakpoint
[] = { 0xCC };
514 #define x86_breakpoint_len 1
517 x86_breakpoint_at (CORE_ADDR pc
)
521 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Low-level function vector. */
529 struct x86_dr_low_type x86_dr_low
=
531 x86_linux_dr_set_control
,
532 x86_linux_dr_set_addr
,
533 x86_linux_dr_get_addr
,
534 x86_linux_dr_get_status
,
535 x86_linux_dr_get_control
,
539 /* Breakpoint/Watchpoint support. */
542 x86_supports_z_point_type (char z_type
)
548 case Z_PACKET_WRITE_WP
:
549 case Z_PACKET_ACCESS_WP
:
557 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
558 int size
, struct raw_breakpoint
*bp
)
560 struct process_info
*proc
= current_process ();
564 case raw_bkpt_type_sw
:
565 return insert_memory_breakpoint (bp
);
567 case raw_bkpt_type_hw
:
568 case raw_bkpt_type_write_wp
:
569 case raw_bkpt_type_access_wp
:
571 enum target_hw_bp_type hw_type
572 = raw_bkpt_type_to_target_hw_bp_type (type
);
573 struct x86_debug_reg_state
*state
574 = &proc
->priv
->arch_private
->debug_reg_state
;
576 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
586 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
587 int size
, struct raw_breakpoint
*bp
)
589 struct process_info
*proc
= current_process ();
593 case raw_bkpt_type_sw
:
594 return remove_memory_breakpoint (bp
);
596 case raw_bkpt_type_hw
:
597 case raw_bkpt_type_write_wp
:
598 case raw_bkpt_type_access_wp
:
600 enum target_hw_bp_type hw_type
601 = raw_bkpt_type_to_target_hw_bp_type (type
);
602 struct x86_debug_reg_state
*state
603 = &proc
->priv
->arch_private
->debug_reg_state
;
605 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
614 x86_stopped_by_watchpoint (void)
616 struct process_info
*proc
= current_process ();
617 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
621 x86_stopped_data_address (void)
623 struct process_info
*proc
= current_process ();
625 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
631 /* Called when a new process is created. */
633 static struct arch_process_info
*
634 x86_linux_new_process (void)
636 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
638 x86_low_init_dregs (&info
->debug_reg_state
);
643 /* Called when a new thread is detected. */
646 x86_linux_new_thread (struct lwp_info
*lwp
)
648 lwp_set_debug_registers_changed (lwp
, 1);
651 /* See nat/x86-dregs.h. */
653 struct x86_debug_reg_state
*
654 x86_debug_reg_state (pid_t pid
)
656 struct process_info
*proc
= find_process_pid (pid
);
658 return &proc
->priv
->arch_private
->debug_reg_state
;
661 /* Called prior to resuming a thread. */
664 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
666 x86_linux_update_debug_registers (lwp
);
669 /* When GDBSERVER is built as a 64-bit application on linux, the
670 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
671 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
672 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
673 conversion in-place ourselves. */
675 /* These types below (compat_*) define a siginfo type that is layout
676 compatible with the siginfo type exported by the 32-bit userspace
681 typedef int compat_int_t
;
682 typedef unsigned int compat_uptr_t
;
684 typedef int compat_time_t
;
685 typedef int compat_timer_t
;
686 typedef int compat_clock_t
;
688 struct compat_timeval
690 compat_time_t tv_sec
;
694 typedef union compat_sigval
696 compat_int_t sival_int
;
697 compat_uptr_t sival_ptr
;
700 typedef struct compat_siginfo
708 int _pad
[((128 / sizeof (int)) - 3)];
717 /* POSIX.1b timers */
722 compat_sigval_t _sigval
;
725 /* POSIX.1b signals */
730 compat_sigval_t _sigval
;
739 compat_clock_t _utime
;
740 compat_clock_t _stime
;
743 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
758 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
759 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
761 typedef struct compat_x32_siginfo
769 int _pad
[((128 / sizeof (int)) - 3)];
778 /* POSIX.1b timers */
783 compat_sigval_t _sigval
;
786 /* POSIX.1b signals */
791 compat_sigval_t _sigval
;
800 compat_x32_clock_t _utime
;
801 compat_x32_clock_t _stime
;
804 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
817 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
819 #define cpt_si_pid _sifields._kill._pid
820 #define cpt_si_uid _sifields._kill._uid
821 #define cpt_si_timerid _sifields._timer._tid
822 #define cpt_si_overrun _sifields._timer._overrun
823 #define cpt_si_status _sifields._sigchld._status
824 #define cpt_si_utime _sifields._sigchld._utime
825 #define cpt_si_stime _sifields._sigchld._stime
826 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
827 #define cpt_si_addr _sifields._sigfault._addr
828 #define cpt_si_band _sifields._sigpoll._band
829 #define cpt_si_fd _sifields._sigpoll._fd
831 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
832 In their place is si_timer1,si_timer2. */
834 #define si_timerid si_timer1
837 #define si_overrun si_timer2
841 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
843 memset (to
, 0, sizeof (*to
));
845 to
->si_signo
= from
->si_signo
;
846 to
->si_errno
= from
->si_errno
;
847 to
->si_code
= from
->si_code
;
849 if (to
->si_code
== SI_TIMER
)
851 to
->cpt_si_timerid
= from
->si_timerid
;
852 to
->cpt_si_overrun
= from
->si_overrun
;
853 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
855 else if (to
->si_code
== SI_USER
)
857 to
->cpt_si_pid
= from
->si_pid
;
858 to
->cpt_si_uid
= from
->si_uid
;
860 else if (to
->si_code
< 0)
862 to
->cpt_si_pid
= from
->si_pid
;
863 to
->cpt_si_uid
= from
->si_uid
;
864 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
868 switch (to
->si_signo
)
871 to
->cpt_si_pid
= from
->si_pid
;
872 to
->cpt_si_uid
= from
->si_uid
;
873 to
->cpt_si_status
= from
->si_status
;
874 to
->cpt_si_utime
= from
->si_utime
;
875 to
->cpt_si_stime
= from
->si_stime
;
881 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
884 to
->cpt_si_band
= from
->si_band
;
885 to
->cpt_si_fd
= from
->si_fd
;
888 to
->cpt_si_pid
= from
->si_pid
;
889 to
->cpt_si_uid
= from
->si_uid
;
890 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
897 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
899 memset (to
, 0, sizeof (*to
));
901 to
->si_signo
= from
->si_signo
;
902 to
->si_errno
= from
->si_errno
;
903 to
->si_code
= from
->si_code
;
905 if (to
->si_code
== SI_TIMER
)
907 to
->si_timerid
= from
->cpt_si_timerid
;
908 to
->si_overrun
= from
->cpt_si_overrun
;
909 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
911 else if (to
->si_code
== SI_USER
)
913 to
->si_pid
= from
->cpt_si_pid
;
914 to
->si_uid
= from
->cpt_si_uid
;
916 else if (to
->si_code
< 0)
918 to
->si_pid
= from
->cpt_si_pid
;
919 to
->si_uid
= from
->cpt_si_uid
;
920 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
924 switch (to
->si_signo
)
927 to
->si_pid
= from
->cpt_si_pid
;
928 to
->si_uid
= from
->cpt_si_uid
;
929 to
->si_status
= from
->cpt_si_status
;
930 to
->si_utime
= from
->cpt_si_utime
;
931 to
->si_stime
= from
->cpt_si_stime
;
937 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
940 to
->si_band
= from
->cpt_si_band
;
941 to
->si_fd
= from
->cpt_si_fd
;
944 to
->si_pid
= from
->cpt_si_pid
;
945 to
->si_uid
= from
->cpt_si_uid
;
946 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
953 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
956 memset (to
, 0, sizeof (*to
));
958 to
->si_signo
= from
->si_signo
;
959 to
->si_errno
= from
->si_errno
;
960 to
->si_code
= from
->si_code
;
962 if (to
->si_code
== SI_TIMER
)
964 to
->cpt_si_timerid
= from
->si_timerid
;
965 to
->cpt_si_overrun
= from
->si_overrun
;
966 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
968 else if (to
->si_code
== SI_USER
)
970 to
->cpt_si_pid
= from
->si_pid
;
971 to
->cpt_si_uid
= from
->si_uid
;
973 else if (to
->si_code
< 0)
975 to
->cpt_si_pid
= from
->si_pid
;
976 to
->cpt_si_uid
= from
->si_uid
;
977 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
981 switch (to
->si_signo
)
984 to
->cpt_si_pid
= from
->si_pid
;
985 to
->cpt_si_uid
= from
->si_uid
;
986 to
->cpt_si_status
= from
->si_status
;
987 to
->cpt_si_utime
= from
->si_utime
;
988 to
->cpt_si_stime
= from
->si_stime
;
994 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
997 to
->cpt_si_band
= from
->si_band
;
998 to
->cpt_si_fd
= from
->si_fd
;
1001 to
->cpt_si_pid
= from
->si_pid
;
1002 to
->cpt_si_uid
= from
->si_uid
;
1003 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1010 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1011 compat_x32_siginfo_t
*from
)
1013 memset (to
, 0, sizeof (*to
));
1015 to
->si_signo
= from
->si_signo
;
1016 to
->si_errno
= from
->si_errno
;
1017 to
->si_code
= from
->si_code
;
1019 if (to
->si_code
== SI_TIMER
)
1021 to
->si_timerid
= from
->cpt_si_timerid
;
1022 to
->si_overrun
= from
->cpt_si_overrun
;
1023 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1025 else if (to
->si_code
== SI_USER
)
1027 to
->si_pid
= from
->cpt_si_pid
;
1028 to
->si_uid
= from
->cpt_si_uid
;
1030 else if (to
->si_code
< 0)
1032 to
->si_pid
= from
->cpt_si_pid
;
1033 to
->si_uid
= from
->cpt_si_uid
;
1034 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1038 switch (to
->si_signo
)
1041 to
->si_pid
= from
->cpt_si_pid
;
1042 to
->si_uid
= from
->cpt_si_uid
;
1043 to
->si_status
= from
->cpt_si_status
;
1044 to
->si_utime
= from
->cpt_si_utime
;
1045 to
->si_stime
= from
->cpt_si_stime
;
1051 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1054 to
->si_band
= from
->cpt_si_band
;
1055 to
->si_fd
= from
->cpt_si_fd
;
1058 to
->si_pid
= from
->cpt_si_pid
;
1059 to
->si_uid
= from
->cpt_si_uid
;
1060 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1066 #endif /* __x86_64__ */
1068 /* Convert a native/host siginfo object, into/from the siginfo in the
1069 layout of the inferiors' architecture. Returns true if any
1070 conversion was done; false otherwise. If DIRECTION is 1, then copy
1071 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1075 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1078 unsigned int machine
;
1079 int tid
= lwpid_of (current_thread
);
1080 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1082 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1083 if (!is_64bit_tdesc ())
1085 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1088 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1090 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1094 /* No fixup for native x32 GDB. */
1095 else if (!is_elf64
&& sizeof (void *) == 8)
1097 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1100 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1103 siginfo_from_compat_x32_siginfo (native
,
1104 (struct compat_x32_siginfo
*) inf
);
1115 /* Format of XSAVE extended state is:
1118 fxsave_bytes[0..463]
1119 sw_usable_bytes[464..511]
1120 xstate_hdr_bytes[512..575]
1125 Same memory layout will be used for the coredump NT_X86_XSTATE
1126 representing the XSAVE extended state registers.
1128 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1129 extended state mask, which is the same as the extended control register
1130 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1131 together with the mask saved in the xstate_hdr_bytes to determine what
1132 states the processor/OS supports and what state, used or initialized,
1133 the process/thread is in. */
1134 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1136 /* Does the current host support the GETFPXREGS request? The header
1137 file may or may not define it, and even if it is defined, the
1138 kernel will return EIO if it's running on a pre-SSE processor. */
1139 int have_ptrace_getfpxregs
=
1140 #ifdef HAVE_PTRACE_GETFPXREGS
1147 /* Does the current host support PTRACE_GETREGSET? */
1148 static int have_ptrace_getregset
= -1;
1150 /* Get Linux/x86 target description from running target. */
1152 static const struct target_desc
*
1153 x86_linux_read_description (void)
1155 unsigned int machine
;
1159 static uint64_t xcr0
;
1160 struct regset_info
*regset
;
1162 tid
= lwpid_of (current_thread
);
1164 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1166 if (sizeof (void *) == 4)
1169 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1171 else if (machine
== EM_X86_64
)
1172 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1176 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1177 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1179 elf_fpxregset_t fpxregs
;
1181 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1183 have_ptrace_getfpxregs
= 0;
1184 have_ptrace_getregset
= 0;
1185 return tdesc_i386_mmx_linux
;
1188 have_ptrace_getfpxregs
= 1;
1194 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1196 /* Don't use XML. */
1198 if (machine
== EM_X86_64
)
1199 return tdesc_amd64_linux_no_xml
;
1202 return tdesc_i386_linux_no_xml
;
1205 if (have_ptrace_getregset
== -1)
1207 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1210 iov
.iov_base
= xstateregs
;
1211 iov
.iov_len
= sizeof (xstateregs
);
1213 /* Check if PTRACE_GETREGSET works. */
1214 if (ptrace (PTRACE_GETREGSET
, tid
,
1215 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1216 have_ptrace_getregset
= 0;
1219 have_ptrace_getregset
= 1;
1221 /* Get XCR0 from XSAVE extended state. */
1222 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1223 / sizeof (uint64_t))];
1225 /* Use PTRACE_GETREGSET if it is available. */
1226 for (regset
= x86_regsets
;
1227 regset
->fill_function
!= NULL
; regset
++)
1228 if (regset
->get_request
== PTRACE_GETREGSET
)
1229 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1230 else if (regset
->type
!= GENERAL_REGS
)
1235 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1236 xcr0_features
= (have_ptrace_getregset
1237 && (xcr0
& X86_XSTATE_ALL_MASK
));
1242 if (machine
== EM_X86_64
)
1249 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1251 case X86_XSTATE_AVX512_MASK
:
1252 return tdesc_amd64_avx512_linux
;
1254 case X86_XSTATE_MPX_MASK
:
1255 return tdesc_amd64_mpx_linux
;
1257 case X86_XSTATE_AVX_MASK
:
1258 return tdesc_amd64_avx_linux
;
1261 return tdesc_amd64_linux
;
1265 return tdesc_amd64_linux
;
1271 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1273 case X86_XSTATE_AVX512_MASK
:
1274 return tdesc_x32_avx512_linux
;
1276 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1277 case X86_XSTATE_AVX_MASK
:
1278 return tdesc_x32_avx_linux
;
1281 return tdesc_x32_linux
;
1285 return tdesc_x32_linux
;
1293 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1295 case (X86_XSTATE_AVX512_MASK
):
1296 return tdesc_i386_avx512_linux
;
1298 case (X86_XSTATE_MPX_MASK
):
1299 return tdesc_i386_mpx_linux
;
1301 case (X86_XSTATE_AVX_MASK
):
1302 return tdesc_i386_avx_linux
;
1305 return tdesc_i386_linux
;
1309 return tdesc_i386_linux
;
1312 gdb_assert_not_reached ("failed to return tdesc");
1315 /* Callback for find_inferior. Stops iteration when a thread with a
1316 given PID is found. */
1319 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1321 int pid
= *(int *) data
;
1323 return (ptid_get_pid (entry
->id
) == pid
);
1326 /* Callback for for_each_inferior. Calls the arch_setup routine for
1330 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1332 int pid
= ptid_get_pid (entry
->id
);
1334 /* Look up any thread of this processes. */
1336 = (struct thread_info
*) find_inferior (&all_threads
,
1337 same_process_callback
, &pid
);
1339 the_low_target
.arch_setup ();
1342 /* Update all the target description of all processes; a new GDB
1343 connected, and it may or not support xml target descriptions. */
1346 x86_linux_update_xmltarget (void)
1348 struct thread_info
*saved_thread
= current_thread
;
1350 /* Before changing the register cache's internal layout, flush the
1351 contents of the current valid caches back to the threads, and
1352 release the current regcache objects. */
1353 regcache_release ();
1355 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1357 current_thread
= saved_thread
;
1360 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1361 PTRACE_GETREGSET. */
1364 x86_linux_process_qsupported (const char *query
)
1366 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1367 with "i386" in qSupported query, it supports x86 XML target
1370 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1372 char *copy
= xstrdup (query
+ 13);
1375 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1377 if (strcmp (p
, "i386") == 0)
1387 x86_linux_update_xmltarget ();
1390 /* Common for x86/x86-64. */
1392 static struct regsets_info x86_regsets_info
=
1394 x86_regsets
, /* regsets */
1395 0, /* num_regsets */
1396 NULL
, /* disabled_regsets */
1400 static struct regs_info amd64_linux_regs_info
=
1402 NULL
, /* regset_bitmap */
1403 NULL
, /* usrregs_info */
1407 static struct usrregs_info i386_linux_usrregs_info
=
1413 static struct regs_info i386_linux_regs_info
=
1415 NULL
, /* regset_bitmap */
1416 &i386_linux_usrregs_info
,
1420 const struct regs_info
*
1421 x86_linux_regs_info (void)
1424 if (is_64bit_tdesc ())
1425 return &amd64_linux_regs_info
;
1428 return &i386_linux_regs_info
;
1431 /* Initialize the target description for the architecture of the
1435 x86_arch_setup (void)
1437 current_process ()->tdesc
= x86_linux_read_description ();
1441 x86_supports_tracepoints (void)
1447 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1449 write_inferior_memory (*to
, buf
, len
);
1454 push_opcode (unsigned char *buf
, char *op
)
1456 unsigned char *buf_org
= buf
;
1461 unsigned long ul
= strtoul (op
, &endptr
, 16);
1470 return buf
- buf_org
;
1475 /* Build a jump pad that saves registers and calls a collection
1476 function. Writes a jump instruction to the jump pad to
1477 JJUMPAD_INSN. The caller is responsible to write it in at the
1478 tracepoint address. */
1481 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1482 CORE_ADDR collector
,
1485 CORE_ADDR
*jump_entry
,
1486 CORE_ADDR
*trampoline
,
1487 ULONGEST
*trampoline_size
,
1488 unsigned char *jjump_pad_insn
,
1489 ULONGEST
*jjump_pad_insn_size
,
1490 CORE_ADDR
*adjusted_insn_addr
,
1491 CORE_ADDR
*adjusted_insn_addr_end
,
1494 unsigned char buf
[40];
1498 CORE_ADDR buildaddr
= *jump_entry
;
1500 /* Build the jump pad. */
1502 /* First, do tracepoint data collection. Save registers. */
1504 /* Need to ensure stack pointer saved first. */
1505 buf
[i
++] = 0x54; /* push %rsp */
1506 buf
[i
++] = 0x55; /* push %rbp */
1507 buf
[i
++] = 0x57; /* push %rdi */
1508 buf
[i
++] = 0x56; /* push %rsi */
1509 buf
[i
++] = 0x52; /* push %rdx */
1510 buf
[i
++] = 0x51; /* push %rcx */
1511 buf
[i
++] = 0x53; /* push %rbx */
1512 buf
[i
++] = 0x50; /* push %rax */
1513 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1514 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1515 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1516 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1517 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1518 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1519 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1520 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1521 buf
[i
++] = 0x9c; /* pushfq */
1522 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1524 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1525 i
+= sizeof (unsigned long);
1526 buf
[i
++] = 0x57; /* push %rdi */
1527 append_insns (&buildaddr
, i
, buf
);
1529 /* Stack space for the collecting_t object. */
1531 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1532 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1533 memcpy (buf
+ i
, &tpoint
, 8);
1535 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1536 i
+= push_opcode (&buf
[i
],
1537 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1538 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1539 append_insns (&buildaddr
, i
, buf
);
1543 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1544 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1546 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1547 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1548 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1549 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1550 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1551 append_insns (&buildaddr
, i
, buf
);
1553 /* Set up the gdb_collect call. */
1554 /* At this point, (stack pointer + 0x18) is the base of our saved
1558 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1559 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1561 /* tpoint address may be 64-bit wide. */
1562 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1563 memcpy (buf
+ i
, &tpoint
, 8);
1565 append_insns (&buildaddr
, i
, buf
);
1567 /* The collector function being in the shared library, may be
1568 >31-bits away off the jump pad. */
1570 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1571 memcpy (buf
+ i
, &collector
, 8);
1573 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1574 append_insns (&buildaddr
, i
, buf
);
1576 /* Clear the spin-lock. */
1578 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1579 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1580 memcpy (buf
+ i
, &lockaddr
, 8);
1582 append_insns (&buildaddr
, i
, buf
);
1584 /* Remove stack that had been used for the collect_t object. */
1586 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1587 append_insns (&buildaddr
, i
, buf
);
1589 /* Restore register state. */
1591 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1595 buf
[i
++] = 0x9d; /* popfq */
1596 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1597 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1598 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1599 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1600 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1601 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1602 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1603 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1604 buf
[i
++] = 0x58; /* pop %rax */
1605 buf
[i
++] = 0x5b; /* pop %rbx */
1606 buf
[i
++] = 0x59; /* pop %rcx */
1607 buf
[i
++] = 0x5a; /* pop %rdx */
1608 buf
[i
++] = 0x5e; /* pop %rsi */
1609 buf
[i
++] = 0x5f; /* pop %rdi */
1610 buf
[i
++] = 0x5d; /* pop %rbp */
1611 buf
[i
++] = 0x5c; /* pop %rsp */
1612 append_insns (&buildaddr
, i
, buf
);
1614 /* Now, adjust the original instruction to execute in the jump
1616 *adjusted_insn_addr
= buildaddr
;
1617 relocate_instruction (&buildaddr
, tpaddr
);
1618 *adjusted_insn_addr_end
= buildaddr
;
1620 /* Finally, write a jump back to the program. */
1622 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1623 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1626 "E.Jump back from jump pad too far from tracepoint "
1627 "(offset 0x%" PRIx64
" > int32).", loffset
);
1631 offset
= (int) loffset
;
1632 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1633 memcpy (buf
+ 1, &offset
, 4);
1634 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1636 /* The jump pad is now built. Wire in a jump to our jump pad. This
1637 is always done last (by our caller actually), so that we can
1638 install fast tracepoints with threads running. This relies on
1639 the agent's atomic write support. */
1640 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1641 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1644 "E.Jump pad too far from tracepoint "
1645 "(offset 0x%" PRIx64
" > int32).", loffset
);
1649 offset
= (int) loffset
;
1651 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1652 memcpy (buf
+ 1, &offset
, 4);
1653 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1654 *jjump_pad_insn_size
= sizeof (jump_insn
);
1656 /* Return the end address of our pad. */
1657 *jump_entry
= buildaddr
;
1662 #endif /* __x86_64__ */
1664 /* Build a jump pad that saves registers and calls a collection
1665 function. Writes a jump instruction to the jump pad to
1666 JJUMPAD_INSN. The caller is responsible to write it in at the
1667 tracepoint address. */
1670 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1671 CORE_ADDR collector
,
1674 CORE_ADDR
*jump_entry
,
1675 CORE_ADDR
*trampoline
,
1676 ULONGEST
*trampoline_size
,
1677 unsigned char *jjump_pad_insn
,
1678 ULONGEST
*jjump_pad_insn_size
,
1679 CORE_ADDR
*adjusted_insn_addr
,
1680 CORE_ADDR
*adjusted_insn_addr_end
,
1683 unsigned char buf
[0x100];
1685 CORE_ADDR buildaddr
= *jump_entry
;
1687 /* Build the jump pad. */
1689 /* First, do tracepoint data collection. Save registers. */
1691 buf
[i
++] = 0x60; /* pushad */
1692 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1693 *((int *)(buf
+ i
)) = (int) tpaddr
;
1695 buf
[i
++] = 0x9c; /* pushf */
1696 buf
[i
++] = 0x1e; /* push %ds */
1697 buf
[i
++] = 0x06; /* push %es */
1698 buf
[i
++] = 0x0f; /* push %fs */
1700 buf
[i
++] = 0x0f; /* push %gs */
1702 buf
[i
++] = 0x16; /* push %ss */
1703 buf
[i
++] = 0x0e; /* push %cs */
1704 append_insns (&buildaddr
, i
, buf
);
1706 /* Stack space for the collecting_t object. */
1708 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1710 /* Build the object. */
1711 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1712 memcpy (buf
+ i
, &tpoint
, 4);
1714 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1716 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1717 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1718 append_insns (&buildaddr
, i
, buf
);
1720 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1721 If we cared for it, this could be using xchg alternatively. */
1724 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1725 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1727 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1729 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1730 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1731 append_insns (&buildaddr
, i
, buf
);
1734 /* Set up arguments to the gdb_collect call. */
1736 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1737 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1738 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1739 append_insns (&buildaddr
, i
, buf
);
1742 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1743 append_insns (&buildaddr
, i
, buf
);
1746 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1747 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1749 append_insns (&buildaddr
, i
, buf
);
1751 buf
[0] = 0xe8; /* call <reladdr> */
1752 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1753 memcpy (buf
+ 1, &offset
, 4);
1754 append_insns (&buildaddr
, 5, buf
);
1755 /* Clean up after the call. */
1756 buf
[0] = 0x83; /* add $0x8,%esp */
1759 append_insns (&buildaddr
, 3, buf
);
1762 /* Clear the spin-lock. This would need the LOCK prefix on older
1765 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1766 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1767 memcpy (buf
+ i
, &lockaddr
, 4);
1769 append_insns (&buildaddr
, i
, buf
);
1772 /* Remove stack that had been used for the collect_t object. */
1774 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1775 append_insns (&buildaddr
, i
, buf
);
1778 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1781 buf
[i
++] = 0x17; /* pop %ss */
1782 buf
[i
++] = 0x0f; /* pop %gs */
1784 buf
[i
++] = 0x0f; /* pop %fs */
1786 buf
[i
++] = 0x07; /* pop %es */
1787 buf
[i
++] = 0x1f; /* pop %ds */
1788 buf
[i
++] = 0x9d; /* popf */
1789 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1792 buf
[i
++] = 0x61; /* popad */
1793 append_insns (&buildaddr
, i
, buf
);
1795 /* Now, adjust the original instruction to execute in the jump
1797 *adjusted_insn_addr
= buildaddr
;
1798 relocate_instruction (&buildaddr
, tpaddr
);
1799 *adjusted_insn_addr_end
= buildaddr
;
1801 /* Write the jump back to the program. */
1802 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1803 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1804 memcpy (buf
+ 1, &offset
, 4);
1805 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1807 /* The jump pad is now built. Wire in a jump to our jump pad. This
1808 is always done last (by our caller actually), so that we can
1809 install fast tracepoints with threads running. This relies on
1810 the agent's atomic write support. */
1813 /* Create a trampoline. */
1814 *trampoline_size
= sizeof (jump_insn
);
1815 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1817 /* No trampoline space available. */
1819 "E.Cannot allocate trampoline space needed for fast "
1820 "tracepoints on 4-byte instructions.");
1824 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1825 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1826 memcpy (buf
+ 1, &offset
, 4);
1827 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1829 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1830 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1831 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1832 memcpy (buf
+ 2, &offset
, 2);
1833 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1834 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1838 /* Else use a 32-bit relative jump instruction. */
1839 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1840 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1841 memcpy (buf
+ 1, &offset
, 4);
1842 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1843 *jjump_pad_insn_size
= sizeof (jump_insn
);
1846 /* Return the end address of our pad. */
1847 *jump_entry
= buildaddr
;
1853 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1854 CORE_ADDR collector
,
1857 CORE_ADDR
*jump_entry
,
1858 CORE_ADDR
*trampoline
,
1859 ULONGEST
*trampoline_size
,
1860 unsigned char *jjump_pad_insn
,
1861 ULONGEST
*jjump_pad_insn_size
,
1862 CORE_ADDR
*adjusted_insn_addr
,
1863 CORE_ADDR
*adjusted_insn_addr_end
,
1867 if (is_64bit_tdesc ())
1868 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1869 collector
, lockaddr
,
1870 orig_size
, jump_entry
,
1871 trampoline
, trampoline_size
,
1873 jjump_pad_insn_size
,
1875 adjusted_insn_addr_end
,
1879 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1880 collector
, lockaddr
,
1881 orig_size
, jump_entry
,
1882 trampoline
, trampoline_size
,
1884 jjump_pad_insn_size
,
1886 adjusted_insn_addr_end
,
1890 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1894 x86_get_min_fast_tracepoint_insn_len (void)
1896 static int warned_about_fast_tracepoints
= 0;
1899 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1900 used for fast tracepoints. */
1901 if (is_64bit_tdesc ())
1905 if (agent_loaded_p ())
1907 char errbuf
[IPA_BUFSIZ
];
1911 /* On x86, if trampolines are available, then 4-byte jump instructions
1912 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1913 with a 4-byte offset are used instead. */
1914 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1918 /* GDB has no channel to explain to user why a shorter fast
1919 tracepoint is not possible, but at least make GDBserver
1920 mention that something has gone awry. */
1921 if (!warned_about_fast_tracepoints
)
1923 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1924 warned_about_fast_tracepoints
= 1;
1931 /* Indicate that the minimum length is currently unknown since the IPA
1932 has not loaded yet. */
1938 add_insns (unsigned char *start
, int len
)
1940 CORE_ADDR buildaddr
= current_insn_ptr
;
1943 debug_printf ("Adding %d bytes of insn at %s\n",
1944 len
, paddress (buildaddr
));
1946 append_insns (&buildaddr
, len
, start
);
1947 current_insn_ptr
= buildaddr
;
1950 /* Our general strategy for emitting code is to avoid specifying raw
1951 bytes whenever possible, and instead copy a block of inline asm
1952 that is embedded in the function. This is a little messy, because
1953 we need to keep the compiler from discarding what looks like dead
1954 code, plus suppress various warnings. */
1956 #define EMIT_ASM(NAME, INSNS) \
1959 extern unsigned char start_ ## NAME, end_ ## NAME; \
1960 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1961 __asm__ ("jmp end_" #NAME "\n" \
1962 "\t" "start_" #NAME ":" \
1964 "\t" "end_" #NAME ":"); \
1969 #define EMIT_ASM32(NAME,INSNS) \
1972 extern unsigned char start_ ## NAME, end_ ## NAME; \
1973 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1974 __asm__ (".code32\n" \
1975 "\t" "jmp end_" #NAME "\n" \
1976 "\t" "start_" #NAME ":\n" \
1978 "\t" "end_" #NAME ":\n" \
1984 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1991 amd64_emit_prologue (void)
1993 EMIT_ASM (amd64_prologue
,
1995 "movq %rsp,%rbp\n\t"
1996 "sub $0x20,%rsp\n\t"
1997 "movq %rdi,-8(%rbp)\n\t"
1998 "movq %rsi,-16(%rbp)");
2003 amd64_emit_epilogue (void)
2005 EMIT_ASM (amd64_epilogue
,
2006 "movq -16(%rbp),%rdi\n\t"
2007 "movq %rax,(%rdi)\n\t"
2014 amd64_emit_add (void)
2016 EMIT_ASM (amd64_add
,
2017 "add (%rsp),%rax\n\t"
2018 "lea 0x8(%rsp),%rsp");
2022 amd64_emit_sub (void)
2024 EMIT_ASM (amd64_sub
,
2025 "sub %rax,(%rsp)\n\t"
2030 amd64_emit_mul (void)
2036 amd64_emit_lsh (void)
2042 amd64_emit_rsh_signed (void)
2048 amd64_emit_rsh_unsigned (void)
2054 amd64_emit_ext (int arg
)
2059 EMIT_ASM (amd64_ext_8
,
2065 EMIT_ASM (amd64_ext_16
,
2070 EMIT_ASM (amd64_ext_32
,
2079 amd64_emit_log_not (void)
2081 EMIT_ASM (amd64_log_not
,
2082 "test %rax,%rax\n\t"
2088 amd64_emit_bit_and (void)
2090 EMIT_ASM (amd64_and
,
2091 "and (%rsp),%rax\n\t"
2092 "lea 0x8(%rsp),%rsp");
2096 amd64_emit_bit_or (void)
2099 "or (%rsp),%rax\n\t"
2100 "lea 0x8(%rsp),%rsp");
2104 amd64_emit_bit_xor (void)
2106 EMIT_ASM (amd64_xor
,
2107 "xor (%rsp),%rax\n\t"
2108 "lea 0x8(%rsp),%rsp");
2112 amd64_emit_bit_not (void)
2114 EMIT_ASM (amd64_bit_not
,
2115 "xorq $0xffffffffffffffff,%rax");
2119 amd64_emit_equal (void)
2121 EMIT_ASM (amd64_equal
,
2122 "cmp %rax,(%rsp)\n\t"
2123 "je .Lamd64_equal_true\n\t"
2125 "jmp .Lamd64_equal_end\n\t"
2126 ".Lamd64_equal_true:\n\t"
2128 ".Lamd64_equal_end:\n\t"
2129 "lea 0x8(%rsp),%rsp");
2133 amd64_emit_less_signed (void)
2135 EMIT_ASM (amd64_less_signed
,
2136 "cmp %rax,(%rsp)\n\t"
2137 "jl .Lamd64_less_signed_true\n\t"
2139 "jmp .Lamd64_less_signed_end\n\t"
2140 ".Lamd64_less_signed_true:\n\t"
2142 ".Lamd64_less_signed_end:\n\t"
2143 "lea 0x8(%rsp),%rsp");
2147 amd64_emit_less_unsigned (void)
2149 EMIT_ASM (amd64_less_unsigned
,
2150 "cmp %rax,(%rsp)\n\t"
2151 "jb .Lamd64_less_unsigned_true\n\t"
2153 "jmp .Lamd64_less_unsigned_end\n\t"
2154 ".Lamd64_less_unsigned_true:\n\t"
2156 ".Lamd64_less_unsigned_end:\n\t"
2157 "lea 0x8(%rsp),%rsp");
2161 amd64_emit_ref (int size
)
2166 EMIT_ASM (amd64_ref1
,
2170 EMIT_ASM (amd64_ref2
,
2174 EMIT_ASM (amd64_ref4
,
2175 "movl (%rax),%eax");
2178 EMIT_ASM (amd64_ref8
,
2179 "movq (%rax),%rax");
2185 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2187 EMIT_ASM (amd64_if_goto
,
2191 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2199 amd64_emit_goto (int *offset_p
, int *size_p
)
2201 EMIT_ASM (amd64_goto
,
2202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2210 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2212 int diff
= (to
- (from
+ size
));
2213 unsigned char buf
[sizeof (int)];
2221 memcpy (buf
, &diff
, sizeof (int));
2222 write_inferior_memory (from
, buf
, sizeof (int));
2226 amd64_emit_const (LONGEST num
)
2228 unsigned char buf
[16];
2230 CORE_ADDR buildaddr
= current_insn_ptr
;
2233 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2234 memcpy (&buf
[i
], &num
, sizeof (num
));
2236 append_insns (&buildaddr
, i
, buf
);
2237 current_insn_ptr
= buildaddr
;
2241 amd64_emit_call (CORE_ADDR fn
)
2243 unsigned char buf
[16];
2245 CORE_ADDR buildaddr
;
2248 /* The destination function being in the shared library, may be
2249 >31-bits away off the compiled code pad. */
2251 buildaddr
= current_insn_ptr
;
2253 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2257 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2259 /* Offset is too large for a call. Use callq, but that requires
2260 a register, so avoid it if possible. Use r10, since it is
2261 call-clobbered, we don't have to push/pop it. */
2262 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2264 memcpy (buf
+ i
, &fn
, 8);
2266 buf
[i
++] = 0xff; /* callq *%r10 */
2271 int offset32
= offset64
; /* we know we can't overflow here. */
2272 memcpy (buf
+ i
, &offset32
, 4);
2276 append_insns (&buildaddr
, i
, buf
);
2277 current_insn_ptr
= buildaddr
;
2281 amd64_emit_reg (int reg
)
2283 unsigned char buf
[16];
2285 CORE_ADDR buildaddr
;
2287 /* Assume raw_regs is still in %rdi. */
2288 buildaddr
= current_insn_ptr
;
2290 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2291 memcpy (&buf
[i
], ®
, sizeof (reg
));
2293 append_insns (&buildaddr
, i
, buf
);
2294 current_insn_ptr
= buildaddr
;
2295 amd64_emit_call (get_raw_reg_func_addr ());
2299 amd64_emit_pop (void)
2301 EMIT_ASM (amd64_pop
,
2306 amd64_emit_stack_flush (void)
2308 EMIT_ASM (amd64_stack_flush
,
2313 amd64_emit_zero_ext (int arg
)
2318 EMIT_ASM (amd64_zero_ext_8
,
2322 EMIT_ASM (amd64_zero_ext_16
,
2323 "and $0xffff,%rax");
2326 EMIT_ASM (amd64_zero_ext_32
,
2327 "mov $0xffffffff,%rcx\n\t"
2336 amd64_emit_swap (void)
2338 EMIT_ASM (amd64_swap
,
2345 amd64_emit_stack_adjust (int n
)
2347 unsigned char buf
[16];
2349 CORE_ADDR buildaddr
= current_insn_ptr
;
2352 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2356 /* This only handles adjustments up to 16, but we don't expect any more. */
2358 append_insns (&buildaddr
, i
, buf
);
2359 current_insn_ptr
= buildaddr
;
2362 /* FN's prototype is `LONGEST(*fn)(int)'. */
2365 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2367 unsigned char buf
[16];
2369 CORE_ADDR buildaddr
;
2371 buildaddr
= current_insn_ptr
;
2373 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2374 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2376 append_insns (&buildaddr
, i
, buf
);
2377 current_insn_ptr
= buildaddr
;
2378 amd64_emit_call (fn
);
2381 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2384 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2386 unsigned char buf
[16];
2388 CORE_ADDR buildaddr
;
2390 buildaddr
= current_insn_ptr
;
2392 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2393 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2395 append_insns (&buildaddr
, i
, buf
);
2396 current_insn_ptr
= buildaddr
;
2397 EMIT_ASM (amd64_void_call_2_a
,
2398 /* Save away a copy of the stack top. */
2400 /* Also pass top as the second argument. */
2402 amd64_emit_call (fn
);
2403 EMIT_ASM (amd64_void_call_2_b
,
2404 /* Restore the stack top, %rax may have been trashed. */
2409 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2412 "cmp %rax,(%rsp)\n\t"
2413 "jne .Lamd64_eq_fallthru\n\t"
2414 "lea 0x8(%rsp),%rsp\n\t"
2416 /* jmp, but don't trust the assembler to choose the right jump */
2417 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2418 ".Lamd64_eq_fallthru:\n\t"
2419 "lea 0x8(%rsp),%rsp\n\t"
2429 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2432 "cmp %rax,(%rsp)\n\t"
2433 "je .Lamd64_ne_fallthru\n\t"
2434 "lea 0x8(%rsp),%rsp\n\t"
2436 /* jmp, but don't trust the assembler to choose the right jump */
2437 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2438 ".Lamd64_ne_fallthru:\n\t"
2439 "lea 0x8(%rsp),%rsp\n\t"
2449 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2452 "cmp %rax,(%rsp)\n\t"
2453 "jnl .Lamd64_lt_fallthru\n\t"
2454 "lea 0x8(%rsp),%rsp\n\t"
2456 /* jmp, but don't trust the assembler to choose the right jump */
2457 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2458 ".Lamd64_lt_fallthru:\n\t"
2459 "lea 0x8(%rsp),%rsp\n\t"
2469 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2472 "cmp %rax,(%rsp)\n\t"
2473 "jnle .Lamd64_le_fallthru\n\t"
2474 "lea 0x8(%rsp),%rsp\n\t"
2476 /* jmp, but don't trust the assembler to choose the right jump */
2477 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2478 ".Lamd64_le_fallthru:\n\t"
2479 "lea 0x8(%rsp),%rsp\n\t"
2489 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2492 "cmp %rax,(%rsp)\n\t"
2493 "jng .Lamd64_gt_fallthru\n\t"
2494 "lea 0x8(%rsp),%rsp\n\t"
2496 /* jmp, but don't trust the assembler to choose the right jump */
2497 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2498 ".Lamd64_gt_fallthru:\n\t"
2499 "lea 0x8(%rsp),%rsp\n\t"
2509 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2512 "cmp %rax,(%rsp)\n\t"
2513 "jnge .Lamd64_ge_fallthru\n\t"
2514 ".Lamd64_ge_jump:\n\t"
2515 "lea 0x8(%rsp),%rsp\n\t"
2517 /* jmp, but don't trust the assembler to choose the right jump */
2518 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2519 ".Lamd64_ge_fallthru:\n\t"
2520 "lea 0x8(%rsp),%rsp\n\t"
2529 struct emit_ops amd64_emit_ops
=
2531 amd64_emit_prologue
,
2532 amd64_emit_epilogue
,
2537 amd64_emit_rsh_signed
,
2538 amd64_emit_rsh_unsigned
,
2546 amd64_emit_less_signed
,
2547 amd64_emit_less_unsigned
,
2551 amd64_write_goto_address
,
2556 amd64_emit_stack_flush
,
2557 amd64_emit_zero_ext
,
2559 amd64_emit_stack_adjust
,
2560 amd64_emit_int_call_1
,
2561 amd64_emit_void_call_2
,
2570 #endif /* __x86_64__ */
2573 i386_emit_prologue (void)
2575 EMIT_ASM32 (i386_prologue
,
2579 /* At this point, the raw regs base address is at 8(%ebp), and the
2580 value pointer is at 12(%ebp). */
2584 i386_emit_epilogue (void)
2586 EMIT_ASM32 (i386_epilogue
,
2587 "mov 12(%ebp),%ecx\n\t"
2588 "mov %eax,(%ecx)\n\t"
2589 "mov %ebx,0x4(%ecx)\n\t"
2597 i386_emit_add (void)
2599 EMIT_ASM32 (i386_add
,
2600 "add (%esp),%eax\n\t"
2601 "adc 0x4(%esp),%ebx\n\t"
2602 "lea 0x8(%esp),%esp");
2606 i386_emit_sub (void)
2608 EMIT_ASM32 (i386_sub
,
2609 "subl %eax,(%esp)\n\t"
2610 "sbbl %ebx,4(%esp)\n\t"
2616 i386_emit_mul (void)
2622 i386_emit_lsh (void)
2628 i386_emit_rsh_signed (void)
2634 i386_emit_rsh_unsigned (void)
2640 i386_emit_ext (int arg
)
2645 EMIT_ASM32 (i386_ext_8
,
2648 "movl %eax,%ebx\n\t"
2652 EMIT_ASM32 (i386_ext_16
,
2654 "movl %eax,%ebx\n\t"
2658 EMIT_ASM32 (i386_ext_32
,
2659 "movl %eax,%ebx\n\t"
2668 i386_emit_log_not (void)
2670 EMIT_ASM32 (i386_log_not
,
2672 "test %eax,%eax\n\t"
2679 i386_emit_bit_and (void)
2681 EMIT_ASM32 (i386_and
,
2682 "and (%esp),%eax\n\t"
2683 "and 0x4(%esp),%ebx\n\t"
2684 "lea 0x8(%esp),%esp");
2688 i386_emit_bit_or (void)
2690 EMIT_ASM32 (i386_or
,
2691 "or (%esp),%eax\n\t"
2692 "or 0x4(%esp),%ebx\n\t"
2693 "lea 0x8(%esp),%esp");
2697 i386_emit_bit_xor (void)
2699 EMIT_ASM32 (i386_xor
,
2700 "xor (%esp),%eax\n\t"
2701 "xor 0x4(%esp),%ebx\n\t"
2702 "lea 0x8(%esp),%esp");
2706 i386_emit_bit_not (void)
2708 EMIT_ASM32 (i386_bit_not
,
2709 "xor $0xffffffff,%eax\n\t"
2710 "xor $0xffffffff,%ebx\n\t");
2714 i386_emit_equal (void)
2716 EMIT_ASM32 (i386_equal
,
2717 "cmpl %ebx,4(%esp)\n\t"
2718 "jne .Li386_equal_false\n\t"
2719 "cmpl %eax,(%esp)\n\t"
2720 "je .Li386_equal_true\n\t"
2721 ".Li386_equal_false:\n\t"
2723 "jmp .Li386_equal_end\n\t"
2724 ".Li386_equal_true:\n\t"
2726 ".Li386_equal_end:\n\t"
2728 "lea 0x8(%esp),%esp");
2732 i386_emit_less_signed (void)
2734 EMIT_ASM32 (i386_less_signed
,
2735 "cmpl %ebx,4(%esp)\n\t"
2736 "jl .Li386_less_signed_true\n\t"
2737 "jne .Li386_less_signed_false\n\t"
2738 "cmpl %eax,(%esp)\n\t"
2739 "jl .Li386_less_signed_true\n\t"
2740 ".Li386_less_signed_false:\n\t"
2742 "jmp .Li386_less_signed_end\n\t"
2743 ".Li386_less_signed_true:\n\t"
2745 ".Li386_less_signed_end:\n\t"
2747 "lea 0x8(%esp),%esp");
2751 i386_emit_less_unsigned (void)
2753 EMIT_ASM32 (i386_less_unsigned
,
2754 "cmpl %ebx,4(%esp)\n\t"
2755 "jb .Li386_less_unsigned_true\n\t"
2756 "jne .Li386_less_unsigned_false\n\t"
2757 "cmpl %eax,(%esp)\n\t"
2758 "jb .Li386_less_unsigned_true\n\t"
2759 ".Li386_less_unsigned_false:\n\t"
2761 "jmp .Li386_less_unsigned_end\n\t"
2762 ".Li386_less_unsigned_true:\n\t"
2764 ".Li386_less_unsigned_end:\n\t"
2766 "lea 0x8(%esp),%esp");
2770 i386_emit_ref (int size
)
2775 EMIT_ASM32 (i386_ref1
,
2779 EMIT_ASM32 (i386_ref2
,
2783 EMIT_ASM32 (i386_ref4
,
2784 "movl (%eax),%eax");
2787 EMIT_ASM32 (i386_ref8
,
2788 "movl 4(%eax),%ebx\n\t"
2789 "movl (%eax),%eax");
2795 i386_emit_if_goto (int *offset_p
, int *size_p
)
2797 EMIT_ASM32 (i386_if_goto
,
2803 /* Don't trust the assembler to choose the right jump */
2804 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2807 *offset_p
= 11; /* be sure that this matches the sequence above */
2813 i386_emit_goto (int *offset_p
, int *size_p
)
2815 EMIT_ASM32 (i386_goto
,
2816 /* Don't trust the assembler to choose the right jump */
2817 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2825 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2827 int diff
= (to
- (from
+ size
));
2828 unsigned char buf
[sizeof (int)];
2830 /* We're only doing 4-byte sizes at the moment. */
2837 memcpy (buf
, &diff
, sizeof (int));
2838 write_inferior_memory (from
, buf
, sizeof (int));
2842 i386_emit_const (LONGEST num
)
2844 unsigned char buf
[16];
2846 CORE_ADDR buildaddr
= current_insn_ptr
;
2849 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2850 lo
= num
& 0xffffffff;
2851 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2853 hi
= ((num
>> 32) & 0xffffffff);
2856 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2857 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2862 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2864 append_insns (&buildaddr
, i
, buf
);
2865 current_insn_ptr
= buildaddr
;
2869 i386_emit_call (CORE_ADDR fn
)
2871 unsigned char buf
[16];
2873 CORE_ADDR buildaddr
;
2875 buildaddr
= current_insn_ptr
;
2877 buf
[i
++] = 0xe8; /* call <reladdr> */
2878 offset
= ((int) fn
) - (buildaddr
+ 5);
2879 memcpy (buf
+ 1, &offset
, 4);
2880 append_insns (&buildaddr
, 5, buf
);
2881 current_insn_ptr
= buildaddr
;
2885 i386_emit_reg (int reg
)
2887 unsigned char buf
[16];
2889 CORE_ADDR buildaddr
;
2891 EMIT_ASM32 (i386_reg_a
,
2893 buildaddr
= current_insn_ptr
;
2895 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2896 memcpy (&buf
[i
], ®
, sizeof (reg
));
2898 append_insns (&buildaddr
, i
, buf
);
2899 current_insn_ptr
= buildaddr
;
2900 EMIT_ASM32 (i386_reg_b
,
2901 "mov %eax,4(%esp)\n\t"
2902 "mov 8(%ebp),%eax\n\t"
2904 i386_emit_call (get_raw_reg_func_addr ());
2905 EMIT_ASM32 (i386_reg_c
,
2907 "lea 0x8(%esp),%esp");
2911 i386_emit_pop (void)
2913 EMIT_ASM32 (i386_pop
,
2919 i386_emit_stack_flush (void)
2921 EMIT_ASM32 (i386_stack_flush
,
2927 i386_emit_zero_ext (int arg
)
2932 EMIT_ASM32 (i386_zero_ext_8
,
2933 "and $0xff,%eax\n\t"
2937 EMIT_ASM32 (i386_zero_ext_16
,
2938 "and $0xffff,%eax\n\t"
2942 EMIT_ASM32 (i386_zero_ext_32
,
2951 i386_emit_swap (void)
2953 EMIT_ASM32 (i386_swap
,
2963 i386_emit_stack_adjust (int n
)
2965 unsigned char buf
[16];
2967 CORE_ADDR buildaddr
= current_insn_ptr
;
2970 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2974 append_insns (&buildaddr
, i
, buf
);
2975 current_insn_ptr
= buildaddr
;
2978 /* FN's prototype is `LONGEST(*fn)(int)'. */
2981 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2983 unsigned char buf
[16];
2985 CORE_ADDR buildaddr
;
2987 EMIT_ASM32 (i386_int_call_1_a
,
2988 /* Reserve a bit of stack space. */
2990 /* Put the one argument on the stack. */
2991 buildaddr
= current_insn_ptr
;
2993 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2996 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2998 append_insns (&buildaddr
, i
, buf
);
2999 current_insn_ptr
= buildaddr
;
3000 i386_emit_call (fn
);
3001 EMIT_ASM32 (i386_int_call_1_c
,
3003 "lea 0x8(%esp),%esp");
3006 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3009 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3011 unsigned char buf
[16];
3013 CORE_ADDR buildaddr
;
3015 EMIT_ASM32 (i386_void_call_2_a
,
3016 /* Preserve %eax only; we don't have to worry about %ebx. */
3018 /* Reserve a bit of stack space for arguments. */
3019 "sub $0x10,%esp\n\t"
3020 /* Copy "top" to the second argument position. (Note that
3021 we can't assume function won't scribble on its
3022 arguments, so don't try to restore from this.) */
3023 "mov %eax,4(%esp)\n\t"
3024 "mov %ebx,8(%esp)");
3025 /* Put the first argument on the stack. */
3026 buildaddr
= current_insn_ptr
;
3028 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3031 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3033 append_insns (&buildaddr
, i
, buf
);
3034 current_insn_ptr
= buildaddr
;
3035 i386_emit_call (fn
);
3036 EMIT_ASM32 (i386_void_call_2_b
,
3037 "lea 0x10(%esp),%esp\n\t"
3038 /* Restore original stack top. */
3044 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3047 /* Check low half first, more likely to be decider */
3048 "cmpl %eax,(%esp)\n\t"
3049 "jne .Leq_fallthru\n\t"
3050 "cmpl %ebx,4(%esp)\n\t"
3051 "jne .Leq_fallthru\n\t"
3052 "lea 0x8(%esp),%esp\n\t"
3055 /* jmp, but don't trust the assembler to choose the right jump */
3056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3057 ".Leq_fallthru:\n\t"
3058 "lea 0x8(%esp),%esp\n\t"
3069 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3072 /* Check low half first, more likely to be decider */
3073 "cmpl %eax,(%esp)\n\t"
3075 "cmpl %ebx,4(%esp)\n\t"
3076 "je .Lne_fallthru\n\t"
3078 "lea 0x8(%esp),%esp\n\t"
3081 /* jmp, but don't trust the assembler to choose the right jump */
3082 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3083 ".Lne_fallthru:\n\t"
3084 "lea 0x8(%esp),%esp\n\t"
3095 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3098 "cmpl %ebx,4(%esp)\n\t"
3100 "jne .Llt_fallthru\n\t"
3101 "cmpl %eax,(%esp)\n\t"
3102 "jnl .Llt_fallthru\n\t"
3104 "lea 0x8(%esp),%esp\n\t"
3107 /* jmp, but don't trust the assembler to choose the right jump */
3108 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3109 ".Llt_fallthru:\n\t"
3110 "lea 0x8(%esp),%esp\n\t"
3121 i386_emit_le_goto (int *offset_p
, int *size_p
)
3124 "cmpl %ebx,4(%esp)\n\t"
3126 "jne .Lle_fallthru\n\t"
3127 "cmpl %eax,(%esp)\n\t"
3128 "jnle .Lle_fallthru\n\t"
3130 "lea 0x8(%esp),%esp\n\t"
3133 /* jmp, but don't trust the assembler to choose the right jump */
3134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3135 ".Lle_fallthru:\n\t"
3136 "lea 0x8(%esp),%esp\n\t"
3147 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3150 "cmpl %ebx,4(%esp)\n\t"
3152 "jne .Lgt_fallthru\n\t"
3153 "cmpl %eax,(%esp)\n\t"
3154 "jng .Lgt_fallthru\n\t"
3156 "lea 0x8(%esp),%esp\n\t"
3159 /* jmp, but don't trust the assembler to choose the right jump */
3160 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3161 ".Lgt_fallthru:\n\t"
3162 "lea 0x8(%esp),%esp\n\t"
3173 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3176 "cmpl %ebx,4(%esp)\n\t"
3178 "jne .Lge_fallthru\n\t"
3179 "cmpl %eax,(%esp)\n\t"
3180 "jnge .Lge_fallthru\n\t"
3182 "lea 0x8(%esp),%esp\n\t"
3185 /* jmp, but don't trust the assembler to choose the right jump */
3186 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3187 ".Lge_fallthru:\n\t"
3188 "lea 0x8(%esp),%esp\n\t"
3198 struct emit_ops i386_emit_ops
=
3206 i386_emit_rsh_signed
,
3207 i386_emit_rsh_unsigned
,
3215 i386_emit_less_signed
,
3216 i386_emit_less_unsigned
,
3220 i386_write_goto_address
,
3225 i386_emit_stack_flush
,
3228 i386_emit_stack_adjust
,
3229 i386_emit_int_call_1
,
3230 i386_emit_void_call_2
,
3240 static struct emit_ops
*
3244 if (is_64bit_tdesc ())
3245 return &amd64_emit_ops
;
3248 return &i386_emit_ops
;
3252 x86_supports_range_stepping (void)
3257 /* This is initialized assuming an amd64 target.
3258 x86_arch_setup will correct it for i386 or amd64 targets. */
3260 struct linux_target_ops the_low_target
=
3263 x86_linux_regs_info
,
3264 x86_cannot_fetch_register
,
3265 x86_cannot_store_register
,
3266 NULL
, /* fetch_register */
3274 x86_supports_z_point_type
,
3277 x86_stopped_by_watchpoint
,
3278 x86_stopped_data_address
,
3279 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3280 native i386 case (no registers smaller than an xfer unit), and are not
3281 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3284 /* need to fix up i386 siginfo if host is amd64 */
3286 x86_linux_new_process
,
3287 x86_linux_new_thread
,
3288 x86_linux_prepare_to_resume
,
3289 x86_linux_process_qsupported
,
3290 x86_supports_tracepoints
,
3291 x86_get_thread_area
,
3292 x86_install_fast_tracepoint_jump_pad
,
3294 x86_get_min_fast_tracepoint_insn_len
,
3295 x86_supports_range_stepping
,
3299 initialize_low_arch (void)
3301 /* Initialize the Linux target descriptions. */
3303 init_registers_amd64_linux ();
3304 init_registers_amd64_avx_linux ();
3305 init_registers_amd64_avx512_linux ();
3306 init_registers_amd64_mpx_linux ();
3308 init_registers_x32_linux ();
3309 init_registers_x32_avx_linux ();
3310 init_registers_x32_avx512_linux ();
3312 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3313 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3314 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3316 init_registers_i386_linux ();
3317 init_registers_i386_mmx_linux ();
3318 init_registers_i386_avx_linux ();
3319 init_registers_i386_avx512_linux ();
3320 init_registers_i386_mpx_linux ();
3322 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3323 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3324 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3326 initialize_regsets_info (&x86_regsets_info
);