1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42 #include "nat/x86-linux-dregs.h"
45 /* Defined in auto-generated file amd64-linux.c. */
46 void init_registers_amd64_linux (void);
47 extern const struct target_desc
*tdesc_amd64_linux
;
49 /* Defined in auto-generated file amd64-avx-linux.c. */
50 void init_registers_amd64_avx_linux (void);
51 extern const struct target_desc
*tdesc_amd64_avx_linux
;
53 /* Defined in auto-generated file amd64-avx512-linux.c. */
54 void init_registers_amd64_avx512_linux (void);
55 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
57 /* Defined in auto-generated file amd64-mpx-linux.c. */
58 void init_registers_amd64_mpx_linux (void);
59 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
61 /* Defined in auto-generated file x32-linux.c. */
62 void init_registers_x32_linux (void);
63 extern const struct target_desc
*tdesc_x32_linux
;
65 /* Defined in auto-generated file x32-avx-linux.c. */
66 void init_registers_x32_avx_linux (void);
67 extern const struct target_desc
*tdesc_x32_avx_linux
;
69 /* Defined in auto-generated file x32-avx512-linux.c. */
70 void init_registers_x32_avx512_linux (void);
71 extern const struct target_desc
*tdesc_x32_avx512_linux
;
75 /* Defined in auto-generated file i386-linux.c. */
76 void init_registers_i386_linux (void);
77 extern const struct target_desc
*tdesc_i386_linux
;
79 /* Defined in auto-generated file i386-mmx-linux.c. */
80 void init_registers_i386_mmx_linux (void);
81 extern const struct target_desc
*tdesc_i386_mmx_linux
;
83 /* Defined in auto-generated file i386-avx-linux.c. */
84 void init_registers_i386_avx_linux (void);
85 extern const struct target_desc
*tdesc_i386_avx_linux
;
87 /* Defined in auto-generated file i386-avx512-linux.c. */
88 void init_registers_i386_avx512_linux (void);
89 extern const struct target_desc
*tdesc_i386_avx512_linux
;
91 /* Defined in auto-generated file i386-mpx-linux.c. */
92 void init_registers_i386_mpx_linux (void);
93 extern const struct target_desc
*tdesc_i386_mpx_linux
;
96 static struct target_desc
*tdesc_amd64_linux_no_xml
;
98 static struct target_desc
*tdesc_i386_linux_no_xml
;
101 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
102 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
104 /* Backward compatibility for gdb without XML support. */
106 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
107 <architecture>i386</architecture>\
108 <osabi>GNU/Linux</osabi>\
112 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
113 <architecture>i386:x86-64</architecture>\
114 <osabi>GNU/Linux</osabi>\
119 #include <sys/procfs.h>
120 #include <sys/ptrace.h>
123 #ifndef PTRACE_GETREGSET
124 #define PTRACE_GETREGSET 0x4204
127 #ifndef PTRACE_SETREGSET
128 #define PTRACE_SETREGSET 0x4205
132 #ifndef PTRACE_GET_THREAD_AREA
133 #define PTRACE_GET_THREAD_AREA 25
136 /* This definition comes from prctl.h, but some kernels may not have it. */
137 #ifndef PTRACE_ARCH_PRCTL
138 #define PTRACE_ARCH_PRCTL 30
141 /* The following definitions come from prctl.h, but may be absent
142 for certain configurations. */
144 #define ARCH_SET_GS 0x1001
145 #define ARCH_SET_FS 0x1002
146 #define ARCH_GET_FS 0x1003
147 #define ARCH_GET_GS 0x1004
150 /* Per-process arch-specific data we want to keep. */
152 struct arch_process_info
154 struct x86_debug_reg_state debug_reg_state
;
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162 static /*const*/ int i386_regmap
[] =
164 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
165 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
166 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
167 DS
* 8, ES
* 8, FS
* 8, GS
* 8
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
172 /* So code below doesn't have to care, i386 or amd64. */
173 #define ORIG_EAX ORIG_RAX
176 static const int x86_64_regmap
[] =
178 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
179 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
180 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
181 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
182 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
183 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
191 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
192 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1
203 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
204 #define X86_64_USER_REGS (GS + 1)
206 #else /* ! __x86_64__ */
208 /* Mapping between the general-purpose registers in `struct user'
209 format and GDB's register array layout. */
210 static /*const*/ int i386_regmap
[] =
212 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
213 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
214 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
215 DS
* 4, ES
* 4, FS
* 4, GS
* 4
218 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
226 /* Returns true if the current inferior belongs to a x86-64 process,
230 is_64bit_tdesc (void)
232 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
234 return register_size (regcache
->tdesc
, 0) == 8;
240 /* Called by libthread_db. */
243 ps_get_thread_area (const struct ps_prochandle
*ph
,
244 lwpid_t lwpid
, int idx
, void **base
)
247 int use_64bit
= is_64bit_tdesc ();
254 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
258 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
269 unsigned int desc
[4];
271 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
272 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
275 /* Ensure we properly extend the value to 64-bits for x86_64. */
276 *base
= (void *) (uintptr_t) desc
[1];
281 /* Get the thread area address. This is used to recognize which
282 thread is which when tracing with the in-process agent library. We
283 don't read anything from the address, and treat it as opaque; it's
284 the address itself that we assume is unique per-thread. */
287 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
290 int use_64bit
= is_64bit_tdesc ();
295 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
297 *addr
= (CORE_ADDR
) (uintptr_t) base
;
306 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
307 struct thread_info
*thr
= get_lwp_thread (lwp
);
308 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
309 unsigned int desc
[4];
311 const int reg_thread_area
= 3; /* bits to scale down register value. */
314 collect_register_by_name (regcache
, "gs", &gs
);
316 idx
= gs
>> reg_thread_area
;
318 if (ptrace (PTRACE_GET_THREAD_AREA
,
320 (void *) (long) idx
, (unsigned long) &desc
) < 0)
331 x86_cannot_store_register (int regno
)
334 if (is_64bit_tdesc ())
338 return regno
>= I386_NUM_REGS
;
342 x86_cannot_fetch_register (int regno
)
345 if (is_64bit_tdesc ())
349 return regno
>= I386_NUM_REGS
;
353 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
358 if (register_size (regcache
->tdesc
, 0) == 8)
360 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
361 if (x86_64_regmap
[i
] != -1)
362 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 collect_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
379 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
384 if (register_size (regcache
->tdesc
, 0) == 8)
386 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
387 if (x86_64_regmap
[i
] != -1)
388 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
393 for (i
= 0; i
< I386_NUM_REGS
; i
++)
394 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
396 supply_register_by_name (regcache
, "orig_eax",
397 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
401 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
406 i387_cache_to_fsave (regcache
, buf
);
411 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
414 i387_fxsave_to_cache (regcache
, buf
);
416 i387_fsave_to_cache (regcache
, buf
);
423 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
425 i387_cache_to_fxsave (regcache
, buf
);
429 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
431 i387_fxsave_to_cache (regcache
, buf
);
437 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
439 i387_cache_to_xsave (regcache
, buf
);
443 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
445 i387_xsave_to_cache (regcache
, buf
);
448 /* ??? The non-biarch i386 case stores all the i387 regs twice.
449 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
450 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
451 doesn't work. IWBN to avoid the duplication in the case where it
452 does work. Maybe the arch_setup routine could check whether it works
453 and update the supported regsets accordingly. */
455 static struct regset_info x86_regsets
[] =
457 #ifdef HAVE_PTRACE_GETREGS
458 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
460 x86_fill_gregset
, x86_store_gregset
},
461 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
462 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
464 # ifdef HAVE_PTRACE_GETFPXREGS
465 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
467 x86_fill_fpxregset
, x86_store_fpxregset
},
470 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
472 x86_fill_fpregset
, x86_store_fpregset
},
473 #endif /* HAVE_PTRACE_GETREGS */
474 { 0, 0, 0, -1, -1, NULL
, NULL
}
478 x86_get_pc (struct regcache
*regcache
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
485 collect_register_by_name (regcache
, "rip", &pc
);
486 return (CORE_ADDR
) pc
;
491 collect_register_by_name (regcache
, "eip", &pc
);
492 return (CORE_ADDR
) pc
;
497 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
499 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
503 unsigned long newpc
= pc
;
504 supply_register_by_name (regcache
, "rip", &newpc
);
508 unsigned int newpc
= pc
;
509 supply_register_by_name (regcache
, "eip", &newpc
);
513 static const unsigned char x86_breakpoint
[] = { 0xCC };
514 #define x86_breakpoint_len 1
517 x86_breakpoint_at (CORE_ADDR pc
)
521 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Low-level function vector. */
529 struct x86_dr_low_type x86_dr_low
=
531 x86_linux_dr_set_control
,
532 x86_linux_dr_set_addr
,
533 x86_linux_dr_get_addr
,
534 x86_linux_dr_get_status
,
535 x86_linux_dr_get_control
,
539 /* Breakpoint/Watchpoint support. */
542 x86_supports_z_point_type (char z_type
)
548 case Z_PACKET_WRITE_WP
:
549 case Z_PACKET_ACCESS_WP
:
557 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
558 int size
, struct raw_breakpoint
*bp
)
560 struct process_info
*proc
= current_process ();
564 case raw_bkpt_type_hw
:
565 case raw_bkpt_type_write_wp
:
566 case raw_bkpt_type_access_wp
:
568 enum target_hw_bp_type hw_type
569 = raw_bkpt_type_to_target_hw_bp_type (type
);
570 struct x86_debug_reg_state
*state
571 = &proc
->priv
->arch_private
->debug_reg_state
;
573 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
583 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
584 int size
, struct raw_breakpoint
*bp
)
586 struct process_info
*proc
= current_process ();
590 case raw_bkpt_type_hw
:
591 case raw_bkpt_type_write_wp
:
592 case raw_bkpt_type_access_wp
:
594 enum target_hw_bp_type hw_type
595 = raw_bkpt_type_to_target_hw_bp_type (type
);
596 struct x86_debug_reg_state
*state
597 = &proc
->priv
->arch_private
->debug_reg_state
;
599 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
608 x86_stopped_by_watchpoint (void)
610 struct process_info
*proc
= current_process ();
611 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
615 x86_stopped_data_address (void)
617 struct process_info
*proc
= current_process ();
619 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
625 /* Called when a new process is created. */
627 static struct arch_process_info
*
628 x86_linux_new_process (void)
630 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
632 x86_low_init_dregs (&info
->debug_reg_state
);
637 /* See nat/x86-dregs.h. */
639 struct x86_debug_reg_state
*
640 x86_debug_reg_state (pid_t pid
)
642 struct process_info
*proc
= find_process_pid (pid
);
644 return &proc
->priv
->arch_private
->debug_reg_state
;
647 /* When GDBSERVER is built as a 64-bit application on linux, the
648 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
649 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
650 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
651 conversion in-place ourselves. */
653 /* These types below (compat_*) define a siginfo type that is layout
654 compatible with the siginfo type exported by the 32-bit userspace
659 typedef int compat_int_t
;
660 typedef unsigned int compat_uptr_t
;
662 typedef int compat_time_t
;
663 typedef int compat_timer_t
;
664 typedef int compat_clock_t
;
666 struct compat_timeval
668 compat_time_t tv_sec
;
672 typedef union compat_sigval
674 compat_int_t sival_int
;
675 compat_uptr_t sival_ptr
;
678 typedef struct compat_siginfo
686 int _pad
[((128 / sizeof (int)) - 3)];
695 /* POSIX.1b timers */
700 compat_sigval_t _sigval
;
703 /* POSIX.1b signals */
708 compat_sigval_t _sigval
;
717 compat_clock_t _utime
;
718 compat_clock_t _stime
;
721 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
736 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
737 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
739 typedef struct compat_x32_siginfo
747 int _pad
[((128 / sizeof (int)) - 3)];
756 /* POSIX.1b timers */
761 compat_sigval_t _sigval
;
764 /* POSIX.1b signals */
769 compat_sigval_t _sigval
;
778 compat_x32_clock_t _utime
;
779 compat_x32_clock_t _stime
;
782 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
795 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
797 #define cpt_si_pid _sifields._kill._pid
798 #define cpt_si_uid _sifields._kill._uid
799 #define cpt_si_timerid _sifields._timer._tid
800 #define cpt_si_overrun _sifields._timer._overrun
801 #define cpt_si_status _sifields._sigchld._status
802 #define cpt_si_utime _sifields._sigchld._utime
803 #define cpt_si_stime _sifields._sigchld._stime
804 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
805 #define cpt_si_addr _sifields._sigfault._addr
806 #define cpt_si_band _sifields._sigpoll._band
807 #define cpt_si_fd _sifields._sigpoll._fd
809 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
810 In their place is si_timer1,si_timer2. */
812 #define si_timerid si_timer1
815 #define si_overrun si_timer2
819 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
821 memset (to
, 0, sizeof (*to
));
823 to
->si_signo
= from
->si_signo
;
824 to
->si_errno
= from
->si_errno
;
825 to
->si_code
= from
->si_code
;
827 if (to
->si_code
== SI_TIMER
)
829 to
->cpt_si_timerid
= from
->si_timerid
;
830 to
->cpt_si_overrun
= from
->si_overrun
;
831 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
833 else if (to
->si_code
== SI_USER
)
835 to
->cpt_si_pid
= from
->si_pid
;
836 to
->cpt_si_uid
= from
->si_uid
;
838 else if (to
->si_code
< 0)
840 to
->cpt_si_pid
= from
->si_pid
;
841 to
->cpt_si_uid
= from
->si_uid
;
842 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
846 switch (to
->si_signo
)
849 to
->cpt_si_pid
= from
->si_pid
;
850 to
->cpt_si_uid
= from
->si_uid
;
851 to
->cpt_si_status
= from
->si_status
;
852 to
->cpt_si_utime
= from
->si_utime
;
853 to
->cpt_si_stime
= from
->si_stime
;
859 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
862 to
->cpt_si_band
= from
->si_band
;
863 to
->cpt_si_fd
= from
->si_fd
;
866 to
->cpt_si_pid
= from
->si_pid
;
867 to
->cpt_si_uid
= from
->si_uid
;
868 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
875 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
877 memset (to
, 0, sizeof (*to
));
879 to
->si_signo
= from
->si_signo
;
880 to
->si_errno
= from
->si_errno
;
881 to
->si_code
= from
->si_code
;
883 if (to
->si_code
== SI_TIMER
)
885 to
->si_timerid
= from
->cpt_si_timerid
;
886 to
->si_overrun
= from
->cpt_si_overrun
;
887 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
889 else if (to
->si_code
== SI_USER
)
891 to
->si_pid
= from
->cpt_si_pid
;
892 to
->si_uid
= from
->cpt_si_uid
;
894 else if (to
->si_code
< 0)
896 to
->si_pid
= from
->cpt_si_pid
;
897 to
->si_uid
= from
->cpt_si_uid
;
898 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
902 switch (to
->si_signo
)
905 to
->si_pid
= from
->cpt_si_pid
;
906 to
->si_uid
= from
->cpt_si_uid
;
907 to
->si_status
= from
->cpt_si_status
;
908 to
->si_utime
= from
->cpt_si_utime
;
909 to
->si_stime
= from
->cpt_si_stime
;
915 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
918 to
->si_band
= from
->cpt_si_band
;
919 to
->si_fd
= from
->cpt_si_fd
;
922 to
->si_pid
= from
->cpt_si_pid
;
923 to
->si_uid
= from
->cpt_si_uid
;
924 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
931 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
934 memset (to
, 0, sizeof (*to
));
936 to
->si_signo
= from
->si_signo
;
937 to
->si_errno
= from
->si_errno
;
938 to
->si_code
= from
->si_code
;
940 if (to
->si_code
== SI_TIMER
)
942 to
->cpt_si_timerid
= from
->si_timerid
;
943 to
->cpt_si_overrun
= from
->si_overrun
;
944 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
946 else if (to
->si_code
== SI_USER
)
948 to
->cpt_si_pid
= from
->si_pid
;
949 to
->cpt_si_uid
= from
->si_uid
;
951 else if (to
->si_code
< 0)
953 to
->cpt_si_pid
= from
->si_pid
;
954 to
->cpt_si_uid
= from
->si_uid
;
955 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
959 switch (to
->si_signo
)
962 to
->cpt_si_pid
= from
->si_pid
;
963 to
->cpt_si_uid
= from
->si_uid
;
964 to
->cpt_si_status
= from
->si_status
;
965 to
->cpt_si_utime
= from
->si_utime
;
966 to
->cpt_si_stime
= from
->si_stime
;
972 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
975 to
->cpt_si_band
= from
->si_band
;
976 to
->cpt_si_fd
= from
->si_fd
;
979 to
->cpt_si_pid
= from
->si_pid
;
980 to
->cpt_si_uid
= from
->si_uid
;
981 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
988 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
989 compat_x32_siginfo_t
*from
)
991 memset (to
, 0, sizeof (*to
));
993 to
->si_signo
= from
->si_signo
;
994 to
->si_errno
= from
->si_errno
;
995 to
->si_code
= from
->si_code
;
997 if (to
->si_code
== SI_TIMER
)
999 to
->si_timerid
= from
->cpt_si_timerid
;
1000 to
->si_overrun
= from
->cpt_si_overrun
;
1001 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1003 else if (to
->si_code
== SI_USER
)
1005 to
->si_pid
= from
->cpt_si_pid
;
1006 to
->si_uid
= from
->cpt_si_uid
;
1008 else if (to
->si_code
< 0)
1010 to
->si_pid
= from
->cpt_si_pid
;
1011 to
->si_uid
= from
->cpt_si_uid
;
1012 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1016 switch (to
->si_signo
)
1019 to
->si_pid
= from
->cpt_si_pid
;
1020 to
->si_uid
= from
->cpt_si_uid
;
1021 to
->si_status
= from
->cpt_si_status
;
1022 to
->si_utime
= from
->cpt_si_utime
;
1023 to
->si_stime
= from
->cpt_si_stime
;
1029 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1032 to
->si_band
= from
->cpt_si_band
;
1033 to
->si_fd
= from
->cpt_si_fd
;
1036 to
->si_pid
= from
->cpt_si_pid
;
1037 to
->si_uid
= from
->cpt_si_uid
;
1038 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1044 #endif /* __x86_64__ */
1046 /* Convert a native/host siginfo object, into/from the siginfo in the
1047 layout of the inferiors' architecture. Returns true if any
1048 conversion was done; false otherwise. If DIRECTION is 1, then copy
1049 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1053 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1056 unsigned int machine
;
1057 int tid
= lwpid_of (current_thread
);
1058 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1060 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1061 if (!is_64bit_tdesc ())
1063 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1066 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1068 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1072 /* No fixup for native x32 GDB. */
1073 else if (!is_elf64
&& sizeof (void *) == 8)
1075 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1078 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1081 siginfo_from_compat_x32_siginfo (native
,
1082 (struct compat_x32_siginfo
*) inf
);
1093 /* Format of XSAVE extended state is:
1096 fxsave_bytes[0..463]
1097 sw_usable_bytes[464..511]
1098 xstate_hdr_bytes[512..575]
1103 Same memory layout will be used for the coredump NT_X86_XSTATE
1104 representing the XSAVE extended state registers.
1106 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1107 extended state mask, which is the same as the extended control register
1108 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1109 together with the mask saved in the xstate_hdr_bytes to determine what
1110 states the processor/OS supports and what state, used or initialized,
1111 the process/thread is in. */
1112 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1114 /* Does the current host support the GETFPXREGS request? The header
1115 file may or may not define it, and even if it is defined, the
1116 kernel will return EIO if it's running on a pre-SSE processor. */
1117 int have_ptrace_getfpxregs
=
1118 #ifdef HAVE_PTRACE_GETFPXREGS
1125 /* Does the current host support PTRACE_GETREGSET? */
1126 static int have_ptrace_getregset
= -1;
1128 /* Get Linux/x86 target description from running target. */
1130 static const struct target_desc
*
1131 x86_linux_read_description (void)
1133 unsigned int machine
;
1137 static uint64_t xcr0
;
1138 struct regset_info
*regset
;
1140 tid
= lwpid_of (current_thread
);
1142 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1144 if (sizeof (void *) == 4)
1147 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1149 else if (machine
== EM_X86_64
)
1150 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1154 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1155 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1157 elf_fpxregset_t fpxregs
;
1159 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1161 have_ptrace_getfpxregs
= 0;
1162 have_ptrace_getregset
= 0;
1163 return tdesc_i386_mmx_linux
;
1166 have_ptrace_getfpxregs
= 1;
1172 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1174 /* Don't use XML. */
1176 if (machine
== EM_X86_64
)
1177 return tdesc_amd64_linux_no_xml
;
1180 return tdesc_i386_linux_no_xml
;
1183 if (have_ptrace_getregset
== -1)
1185 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1188 iov
.iov_base
= xstateregs
;
1189 iov
.iov_len
= sizeof (xstateregs
);
1191 /* Check if PTRACE_GETREGSET works. */
1192 if (ptrace (PTRACE_GETREGSET
, tid
,
1193 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1194 have_ptrace_getregset
= 0;
1197 have_ptrace_getregset
= 1;
1199 /* Get XCR0 from XSAVE extended state. */
1200 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1201 / sizeof (uint64_t))];
1203 /* Use PTRACE_GETREGSET if it is available. */
1204 for (regset
= x86_regsets
;
1205 regset
->fill_function
!= NULL
; regset
++)
1206 if (regset
->get_request
== PTRACE_GETREGSET
)
1207 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1208 else if (regset
->type
!= GENERAL_REGS
)
1213 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1214 xcr0_features
= (have_ptrace_getregset
1215 && (xcr0
& X86_XSTATE_ALL_MASK
));
1220 if (machine
== EM_X86_64
)
1227 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1229 case X86_XSTATE_AVX512_MASK
:
1230 return tdesc_amd64_avx512_linux
;
1232 case X86_XSTATE_MPX_MASK
:
1233 return tdesc_amd64_mpx_linux
;
1235 case X86_XSTATE_AVX_MASK
:
1236 return tdesc_amd64_avx_linux
;
1239 return tdesc_amd64_linux
;
1243 return tdesc_amd64_linux
;
1249 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1251 case X86_XSTATE_AVX512_MASK
:
1252 return tdesc_x32_avx512_linux
;
1254 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1255 case X86_XSTATE_AVX_MASK
:
1256 return tdesc_x32_avx_linux
;
1259 return tdesc_x32_linux
;
1263 return tdesc_x32_linux
;
1271 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1273 case (X86_XSTATE_AVX512_MASK
):
1274 return tdesc_i386_avx512_linux
;
1276 case (X86_XSTATE_MPX_MASK
):
1277 return tdesc_i386_mpx_linux
;
1279 case (X86_XSTATE_AVX_MASK
):
1280 return tdesc_i386_avx_linux
;
1283 return tdesc_i386_linux
;
1287 return tdesc_i386_linux
;
1290 gdb_assert_not_reached ("failed to return tdesc");
1293 /* Callback for find_inferior. Stops iteration when a thread with a
1294 given PID is found. */
1297 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1299 int pid
= *(int *) data
;
1301 return (ptid_get_pid (entry
->id
) == pid
);
1304 /* Callback for for_each_inferior. Calls the arch_setup routine for
1308 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1310 int pid
= ptid_get_pid (entry
->id
);
1312 /* Look up any thread of this processes. */
1314 = (struct thread_info
*) find_inferior (&all_threads
,
1315 same_process_callback
, &pid
);
1317 the_low_target
.arch_setup ();
1320 /* Update all the target description of all processes; a new GDB
1321 connected, and it may or not support xml target descriptions. */
1324 x86_linux_update_xmltarget (void)
1326 struct thread_info
*saved_thread
= current_thread
;
1328 /* Before changing the register cache's internal layout, flush the
1329 contents of the current valid caches back to the threads, and
1330 release the current regcache objects. */
1331 regcache_release ();
1333 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1335 current_thread
= saved_thread
;
1338 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1339 PTRACE_GETREGSET. */
1342 x86_linux_process_qsupported (const char *query
)
1344 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1345 with "i386" in qSupported query, it supports x86 XML target
1348 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1350 char *copy
= xstrdup (query
+ 13);
1353 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1355 if (strcmp (p
, "i386") == 0)
1365 x86_linux_update_xmltarget ();
1368 /* Common for x86/x86-64. */
1370 static struct regsets_info x86_regsets_info
=
1372 x86_regsets
, /* regsets */
1373 0, /* num_regsets */
1374 NULL
, /* disabled_regsets */
1378 static struct regs_info amd64_linux_regs_info
=
1380 NULL
, /* regset_bitmap */
1381 NULL
, /* usrregs_info */
1385 static struct usrregs_info i386_linux_usrregs_info
=
1391 static struct regs_info i386_linux_regs_info
=
1393 NULL
, /* regset_bitmap */
1394 &i386_linux_usrregs_info
,
1398 const struct regs_info
*
1399 x86_linux_regs_info (void)
1402 if (is_64bit_tdesc ())
1403 return &amd64_linux_regs_info
;
1406 return &i386_linux_regs_info
;
1409 /* Initialize the target description for the architecture of the
1413 x86_arch_setup (void)
1415 current_process ()->tdesc
= x86_linux_read_description ();
1419 x86_supports_tracepoints (void)
1425 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1427 write_inferior_memory (*to
, buf
, len
);
1432 push_opcode (unsigned char *buf
, char *op
)
1434 unsigned char *buf_org
= buf
;
1439 unsigned long ul
= strtoul (op
, &endptr
, 16);
1448 return buf
- buf_org
;
1453 /* Build a jump pad that saves registers and calls a collection
1454 function. Writes a jump instruction to the jump pad to
1455 JJUMPAD_INSN. The caller is responsible to write it in at the
1456 tracepoint address. */
1459 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1460 CORE_ADDR collector
,
1463 CORE_ADDR
*jump_entry
,
1464 CORE_ADDR
*trampoline
,
1465 ULONGEST
*trampoline_size
,
1466 unsigned char *jjump_pad_insn
,
1467 ULONGEST
*jjump_pad_insn_size
,
1468 CORE_ADDR
*adjusted_insn_addr
,
1469 CORE_ADDR
*adjusted_insn_addr_end
,
1472 unsigned char buf
[40];
1476 CORE_ADDR buildaddr
= *jump_entry
;
1478 /* Build the jump pad. */
1480 /* First, do tracepoint data collection. Save registers. */
1482 /* Need to ensure stack pointer saved first. */
1483 buf
[i
++] = 0x54; /* push %rsp */
1484 buf
[i
++] = 0x55; /* push %rbp */
1485 buf
[i
++] = 0x57; /* push %rdi */
1486 buf
[i
++] = 0x56; /* push %rsi */
1487 buf
[i
++] = 0x52; /* push %rdx */
1488 buf
[i
++] = 0x51; /* push %rcx */
1489 buf
[i
++] = 0x53; /* push %rbx */
1490 buf
[i
++] = 0x50; /* push %rax */
1491 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1492 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1493 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1494 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1495 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1496 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1497 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1498 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1499 buf
[i
++] = 0x9c; /* pushfq */
1500 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1502 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1503 i
+= sizeof (unsigned long);
1504 buf
[i
++] = 0x57; /* push %rdi */
1505 append_insns (&buildaddr
, i
, buf
);
1507 /* Stack space for the collecting_t object. */
1509 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1510 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1511 memcpy (buf
+ i
, &tpoint
, 8);
1513 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1514 i
+= push_opcode (&buf
[i
],
1515 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1516 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1517 append_insns (&buildaddr
, i
, buf
);
1521 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1522 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1524 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1525 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1526 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1527 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1528 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1529 append_insns (&buildaddr
, i
, buf
);
1531 /* Set up the gdb_collect call. */
1532 /* At this point, (stack pointer + 0x18) is the base of our saved
1536 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1537 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1539 /* tpoint address may be 64-bit wide. */
1540 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1541 memcpy (buf
+ i
, &tpoint
, 8);
1543 append_insns (&buildaddr
, i
, buf
);
1545 /* The collector function being in the shared library, may be
1546 >31-bits away off the jump pad. */
1548 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1549 memcpy (buf
+ i
, &collector
, 8);
1551 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1552 append_insns (&buildaddr
, i
, buf
);
1554 /* Clear the spin-lock. */
1556 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1557 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1558 memcpy (buf
+ i
, &lockaddr
, 8);
1560 append_insns (&buildaddr
, i
, buf
);
1562 /* Remove stack that had been used for the collect_t object. */
1564 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1565 append_insns (&buildaddr
, i
, buf
);
1567 /* Restore register state. */
1569 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1573 buf
[i
++] = 0x9d; /* popfq */
1574 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1575 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1576 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1577 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1578 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1579 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1580 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1581 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1582 buf
[i
++] = 0x58; /* pop %rax */
1583 buf
[i
++] = 0x5b; /* pop %rbx */
1584 buf
[i
++] = 0x59; /* pop %rcx */
1585 buf
[i
++] = 0x5a; /* pop %rdx */
1586 buf
[i
++] = 0x5e; /* pop %rsi */
1587 buf
[i
++] = 0x5f; /* pop %rdi */
1588 buf
[i
++] = 0x5d; /* pop %rbp */
1589 buf
[i
++] = 0x5c; /* pop %rsp */
1590 append_insns (&buildaddr
, i
, buf
);
1592 /* Now, adjust the original instruction to execute in the jump
1594 *adjusted_insn_addr
= buildaddr
;
1595 relocate_instruction (&buildaddr
, tpaddr
);
1596 *adjusted_insn_addr_end
= buildaddr
;
1598 /* Finally, write a jump back to the program. */
1600 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1601 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1604 "E.Jump back from jump pad too far from tracepoint "
1605 "(offset 0x%" PRIx64
" > int32).", loffset
);
1609 offset
= (int) loffset
;
1610 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1611 memcpy (buf
+ 1, &offset
, 4);
1612 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1614 /* The jump pad is now built. Wire in a jump to our jump pad. This
1615 is always done last (by our caller actually), so that we can
1616 install fast tracepoints with threads running. This relies on
1617 the agent's atomic write support. */
1618 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1619 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1622 "E.Jump pad too far from tracepoint "
1623 "(offset 0x%" PRIx64
" > int32).", loffset
);
1627 offset
= (int) loffset
;
1629 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1630 memcpy (buf
+ 1, &offset
, 4);
1631 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1632 *jjump_pad_insn_size
= sizeof (jump_insn
);
1634 /* Return the end address of our pad. */
1635 *jump_entry
= buildaddr
;
1640 #endif /* __x86_64__ */
1642 /* Build a jump pad that saves registers and calls a collection
1643 function. Writes a jump instruction to the jump pad to
1644 JJUMPAD_INSN. The caller is responsible to write it in at the
1645 tracepoint address. */
1648 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1649 CORE_ADDR collector
,
1652 CORE_ADDR
*jump_entry
,
1653 CORE_ADDR
*trampoline
,
1654 ULONGEST
*trampoline_size
,
1655 unsigned char *jjump_pad_insn
,
1656 ULONGEST
*jjump_pad_insn_size
,
1657 CORE_ADDR
*adjusted_insn_addr
,
1658 CORE_ADDR
*adjusted_insn_addr_end
,
1661 unsigned char buf
[0x100];
1663 CORE_ADDR buildaddr
= *jump_entry
;
1665 /* Build the jump pad. */
1667 /* First, do tracepoint data collection. Save registers. */
1669 buf
[i
++] = 0x60; /* pushad */
1670 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1671 *((int *)(buf
+ i
)) = (int) tpaddr
;
1673 buf
[i
++] = 0x9c; /* pushf */
1674 buf
[i
++] = 0x1e; /* push %ds */
1675 buf
[i
++] = 0x06; /* push %es */
1676 buf
[i
++] = 0x0f; /* push %fs */
1678 buf
[i
++] = 0x0f; /* push %gs */
1680 buf
[i
++] = 0x16; /* push %ss */
1681 buf
[i
++] = 0x0e; /* push %cs */
1682 append_insns (&buildaddr
, i
, buf
);
1684 /* Stack space for the collecting_t object. */
1686 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1688 /* Build the object. */
1689 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1690 memcpy (buf
+ i
, &tpoint
, 4);
1692 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1694 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1695 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1696 append_insns (&buildaddr
, i
, buf
);
1698 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1699 If we cared for it, this could be using xchg alternatively. */
1702 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1703 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1705 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1707 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1708 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1709 append_insns (&buildaddr
, i
, buf
);
1712 /* Set up arguments to the gdb_collect call. */
1714 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1715 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1716 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1717 append_insns (&buildaddr
, i
, buf
);
1720 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1721 append_insns (&buildaddr
, i
, buf
);
1724 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1725 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1727 append_insns (&buildaddr
, i
, buf
);
1729 buf
[0] = 0xe8; /* call <reladdr> */
1730 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1731 memcpy (buf
+ 1, &offset
, 4);
1732 append_insns (&buildaddr
, 5, buf
);
1733 /* Clean up after the call. */
1734 buf
[0] = 0x83; /* add $0x8,%esp */
1737 append_insns (&buildaddr
, 3, buf
);
1740 /* Clear the spin-lock. This would need the LOCK prefix on older
1743 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1744 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1745 memcpy (buf
+ i
, &lockaddr
, 4);
1747 append_insns (&buildaddr
, i
, buf
);
1750 /* Remove stack that had been used for the collect_t object. */
1752 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1753 append_insns (&buildaddr
, i
, buf
);
1756 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1759 buf
[i
++] = 0x17; /* pop %ss */
1760 buf
[i
++] = 0x0f; /* pop %gs */
1762 buf
[i
++] = 0x0f; /* pop %fs */
1764 buf
[i
++] = 0x07; /* pop %es */
1765 buf
[i
++] = 0x1f; /* pop %ds */
1766 buf
[i
++] = 0x9d; /* popf */
1767 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1770 buf
[i
++] = 0x61; /* popad */
1771 append_insns (&buildaddr
, i
, buf
);
1773 /* Now, adjust the original instruction to execute in the jump
1775 *adjusted_insn_addr
= buildaddr
;
1776 relocate_instruction (&buildaddr
, tpaddr
);
1777 *adjusted_insn_addr_end
= buildaddr
;
1779 /* Write the jump back to the program. */
1780 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1781 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1782 memcpy (buf
+ 1, &offset
, 4);
1783 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1785 /* The jump pad is now built. Wire in a jump to our jump pad. This
1786 is always done last (by our caller actually), so that we can
1787 install fast tracepoints with threads running. This relies on
1788 the agent's atomic write support. */
1791 /* Create a trampoline. */
1792 *trampoline_size
= sizeof (jump_insn
);
1793 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1795 /* No trampoline space available. */
1797 "E.Cannot allocate trampoline space needed for fast "
1798 "tracepoints on 4-byte instructions.");
1802 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1803 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1804 memcpy (buf
+ 1, &offset
, 4);
1805 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1807 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1808 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1809 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1810 memcpy (buf
+ 2, &offset
, 2);
1811 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1812 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1816 /* Else use a 32-bit relative jump instruction. */
1817 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1818 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1819 memcpy (buf
+ 1, &offset
, 4);
1820 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1821 *jjump_pad_insn_size
= sizeof (jump_insn
);
1824 /* Return the end address of our pad. */
1825 *jump_entry
= buildaddr
;
1831 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1832 CORE_ADDR collector
,
1835 CORE_ADDR
*jump_entry
,
1836 CORE_ADDR
*trampoline
,
1837 ULONGEST
*trampoline_size
,
1838 unsigned char *jjump_pad_insn
,
1839 ULONGEST
*jjump_pad_insn_size
,
1840 CORE_ADDR
*adjusted_insn_addr
,
1841 CORE_ADDR
*adjusted_insn_addr_end
,
1845 if (is_64bit_tdesc ())
1846 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1847 collector
, lockaddr
,
1848 orig_size
, jump_entry
,
1849 trampoline
, trampoline_size
,
1851 jjump_pad_insn_size
,
1853 adjusted_insn_addr_end
,
1857 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1858 collector
, lockaddr
,
1859 orig_size
, jump_entry
,
1860 trampoline
, trampoline_size
,
1862 jjump_pad_insn_size
,
1864 adjusted_insn_addr_end
,
1868 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1872 x86_get_min_fast_tracepoint_insn_len (void)
1874 static int warned_about_fast_tracepoints
= 0;
1877 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1878 used for fast tracepoints. */
1879 if (is_64bit_tdesc ())
1883 if (agent_loaded_p ())
1885 char errbuf
[IPA_BUFSIZ
];
1889 /* On x86, if trampolines are available, then 4-byte jump instructions
1890 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1891 with a 4-byte offset are used instead. */
1892 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1896 /* GDB has no channel to explain to user why a shorter fast
1897 tracepoint is not possible, but at least make GDBserver
1898 mention that something has gone awry. */
1899 if (!warned_about_fast_tracepoints
)
1901 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1902 warned_about_fast_tracepoints
= 1;
1909 /* Indicate that the minimum length is currently unknown since the IPA
1910 has not loaded yet. */
1916 add_insns (unsigned char *start
, int len
)
1918 CORE_ADDR buildaddr
= current_insn_ptr
;
1921 debug_printf ("Adding %d bytes of insn at %s\n",
1922 len
, paddress (buildaddr
));
1924 append_insns (&buildaddr
, len
, start
);
1925 current_insn_ptr
= buildaddr
;
1928 /* Our general strategy for emitting code is to avoid specifying raw
1929 bytes whenever possible, and instead copy a block of inline asm
1930 that is embedded in the function. This is a little messy, because
1931 we need to keep the compiler from discarding what looks like dead
1932 code, plus suppress various warnings. */
1934 #define EMIT_ASM(NAME, INSNS) \
1937 extern unsigned char start_ ## NAME, end_ ## NAME; \
1938 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1939 __asm__ ("jmp end_" #NAME "\n" \
1940 "\t" "start_" #NAME ":" \
1942 "\t" "end_" #NAME ":"); \
1947 #define EMIT_ASM32(NAME,INSNS) \
1950 extern unsigned char start_ ## NAME, end_ ## NAME; \
1951 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1952 __asm__ (".code32\n" \
1953 "\t" "jmp end_" #NAME "\n" \
1954 "\t" "start_" #NAME ":\n" \
1956 "\t" "end_" #NAME ":\n" \
1962 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1969 amd64_emit_prologue (void)
1971 EMIT_ASM (amd64_prologue
,
1973 "movq %rsp,%rbp\n\t"
1974 "sub $0x20,%rsp\n\t"
1975 "movq %rdi,-8(%rbp)\n\t"
1976 "movq %rsi,-16(%rbp)");
1981 amd64_emit_epilogue (void)
1983 EMIT_ASM (amd64_epilogue
,
1984 "movq -16(%rbp),%rdi\n\t"
1985 "movq %rax,(%rdi)\n\t"
1992 amd64_emit_add (void)
1994 EMIT_ASM (amd64_add
,
1995 "add (%rsp),%rax\n\t"
1996 "lea 0x8(%rsp),%rsp");
2000 amd64_emit_sub (void)
2002 EMIT_ASM (amd64_sub
,
2003 "sub %rax,(%rsp)\n\t"
2008 amd64_emit_mul (void)
2014 amd64_emit_lsh (void)
2020 amd64_emit_rsh_signed (void)
2026 amd64_emit_rsh_unsigned (void)
2032 amd64_emit_ext (int arg
)
2037 EMIT_ASM (amd64_ext_8
,
2043 EMIT_ASM (amd64_ext_16
,
2048 EMIT_ASM (amd64_ext_32
,
2057 amd64_emit_log_not (void)
2059 EMIT_ASM (amd64_log_not
,
2060 "test %rax,%rax\n\t"
2066 amd64_emit_bit_and (void)
2068 EMIT_ASM (amd64_and
,
2069 "and (%rsp),%rax\n\t"
2070 "lea 0x8(%rsp),%rsp");
2074 amd64_emit_bit_or (void)
2077 "or (%rsp),%rax\n\t"
2078 "lea 0x8(%rsp),%rsp");
2082 amd64_emit_bit_xor (void)
2084 EMIT_ASM (amd64_xor
,
2085 "xor (%rsp),%rax\n\t"
2086 "lea 0x8(%rsp),%rsp");
2090 amd64_emit_bit_not (void)
2092 EMIT_ASM (amd64_bit_not
,
2093 "xorq $0xffffffffffffffff,%rax");
2097 amd64_emit_equal (void)
2099 EMIT_ASM (amd64_equal
,
2100 "cmp %rax,(%rsp)\n\t"
2101 "je .Lamd64_equal_true\n\t"
2103 "jmp .Lamd64_equal_end\n\t"
2104 ".Lamd64_equal_true:\n\t"
2106 ".Lamd64_equal_end:\n\t"
2107 "lea 0x8(%rsp),%rsp");
2111 amd64_emit_less_signed (void)
2113 EMIT_ASM (amd64_less_signed
,
2114 "cmp %rax,(%rsp)\n\t"
2115 "jl .Lamd64_less_signed_true\n\t"
2117 "jmp .Lamd64_less_signed_end\n\t"
2118 ".Lamd64_less_signed_true:\n\t"
2120 ".Lamd64_less_signed_end:\n\t"
2121 "lea 0x8(%rsp),%rsp");
2125 amd64_emit_less_unsigned (void)
2127 EMIT_ASM (amd64_less_unsigned
,
2128 "cmp %rax,(%rsp)\n\t"
2129 "jb .Lamd64_less_unsigned_true\n\t"
2131 "jmp .Lamd64_less_unsigned_end\n\t"
2132 ".Lamd64_less_unsigned_true:\n\t"
2134 ".Lamd64_less_unsigned_end:\n\t"
2135 "lea 0x8(%rsp),%rsp");
2139 amd64_emit_ref (int size
)
2144 EMIT_ASM (amd64_ref1
,
2148 EMIT_ASM (amd64_ref2
,
2152 EMIT_ASM (amd64_ref4
,
2153 "movl (%rax),%eax");
2156 EMIT_ASM (amd64_ref8
,
2157 "movq (%rax),%rax");
2163 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2165 EMIT_ASM (amd64_if_goto
,
2169 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2177 amd64_emit_goto (int *offset_p
, int *size_p
)
2179 EMIT_ASM (amd64_goto
,
2180 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2188 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2190 int diff
= (to
- (from
+ size
));
2191 unsigned char buf
[sizeof (int)];
2199 memcpy (buf
, &diff
, sizeof (int));
2200 write_inferior_memory (from
, buf
, sizeof (int));
2204 amd64_emit_const (LONGEST num
)
2206 unsigned char buf
[16];
2208 CORE_ADDR buildaddr
= current_insn_ptr
;
2211 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2212 memcpy (&buf
[i
], &num
, sizeof (num
));
2214 append_insns (&buildaddr
, i
, buf
);
2215 current_insn_ptr
= buildaddr
;
2219 amd64_emit_call (CORE_ADDR fn
)
2221 unsigned char buf
[16];
2223 CORE_ADDR buildaddr
;
2226 /* The destination function being in the shared library, may be
2227 >31-bits away off the compiled code pad. */
2229 buildaddr
= current_insn_ptr
;
2231 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2235 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2237 /* Offset is too large for a call. Use callq, but that requires
2238 a register, so avoid it if possible. Use r10, since it is
2239 call-clobbered, we don't have to push/pop it. */
2240 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2242 memcpy (buf
+ i
, &fn
, 8);
2244 buf
[i
++] = 0xff; /* callq *%r10 */
2249 int offset32
= offset64
; /* we know we can't overflow here. */
2250 memcpy (buf
+ i
, &offset32
, 4);
2254 append_insns (&buildaddr
, i
, buf
);
2255 current_insn_ptr
= buildaddr
;
2259 amd64_emit_reg (int reg
)
2261 unsigned char buf
[16];
2263 CORE_ADDR buildaddr
;
2265 /* Assume raw_regs is still in %rdi. */
2266 buildaddr
= current_insn_ptr
;
2268 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2269 memcpy (&buf
[i
], ®
, sizeof (reg
));
2271 append_insns (&buildaddr
, i
, buf
);
2272 current_insn_ptr
= buildaddr
;
2273 amd64_emit_call (get_raw_reg_func_addr ());
2277 amd64_emit_pop (void)
2279 EMIT_ASM (amd64_pop
,
2284 amd64_emit_stack_flush (void)
2286 EMIT_ASM (amd64_stack_flush
,
2291 amd64_emit_zero_ext (int arg
)
2296 EMIT_ASM (amd64_zero_ext_8
,
2300 EMIT_ASM (amd64_zero_ext_16
,
2301 "and $0xffff,%rax");
2304 EMIT_ASM (amd64_zero_ext_32
,
2305 "mov $0xffffffff,%rcx\n\t"
2314 amd64_emit_swap (void)
2316 EMIT_ASM (amd64_swap
,
2323 amd64_emit_stack_adjust (int n
)
2325 unsigned char buf
[16];
2327 CORE_ADDR buildaddr
= current_insn_ptr
;
2330 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2334 /* This only handles adjustments up to 16, but we don't expect any more. */
2336 append_insns (&buildaddr
, i
, buf
);
2337 current_insn_ptr
= buildaddr
;
2340 /* FN's prototype is `LONGEST(*fn)(int)'. */
2343 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2345 unsigned char buf
[16];
2347 CORE_ADDR buildaddr
;
2349 buildaddr
= current_insn_ptr
;
2351 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2352 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2354 append_insns (&buildaddr
, i
, buf
);
2355 current_insn_ptr
= buildaddr
;
2356 amd64_emit_call (fn
);
2359 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2362 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2364 unsigned char buf
[16];
2366 CORE_ADDR buildaddr
;
2368 buildaddr
= current_insn_ptr
;
2370 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2371 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2373 append_insns (&buildaddr
, i
, buf
);
2374 current_insn_ptr
= buildaddr
;
2375 EMIT_ASM (amd64_void_call_2_a
,
2376 /* Save away a copy of the stack top. */
2378 /* Also pass top as the second argument. */
2380 amd64_emit_call (fn
);
2381 EMIT_ASM (amd64_void_call_2_b
,
2382 /* Restore the stack top, %rax may have been trashed. */
2387 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2390 "cmp %rax,(%rsp)\n\t"
2391 "jne .Lamd64_eq_fallthru\n\t"
2392 "lea 0x8(%rsp),%rsp\n\t"
2394 /* jmp, but don't trust the assembler to choose the right jump */
2395 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2396 ".Lamd64_eq_fallthru:\n\t"
2397 "lea 0x8(%rsp),%rsp\n\t"
2407 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2410 "cmp %rax,(%rsp)\n\t"
2411 "je .Lamd64_ne_fallthru\n\t"
2412 "lea 0x8(%rsp),%rsp\n\t"
2414 /* jmp, but don't trust the assembler to choose the right jump */
2415 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2416 ".Lamd64_ne_fallthru:\n\t"
2417 "lea 0x8(%rsp),%rsp\n\t"
2427 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2430 "cmp %rax,(%rsp)\n\t"
2431 "jnl .Lamd64_lt_fallthru\n\t"
2432 "lea 0x8(%rsp),%rsp\n\t"
2434 /* jmp, but don't trust the assembler to choose the right jump */
2435 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2436 ".Lamd64_lt_fallthru:\n\t"
2437 "lea 0x8(%rsp),%rsp\n\t"
2447 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2450 "cmp %rax,(%rsp)\n\t"
2451 "jnle .Lamd64_le_fallthru\n\t"
2452 "lea 0x8(%rsp),%rsp\n\t"
2454 /* jmp, but don't trust the assembler to choose the right jump */
2455 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2456 ".Lamd64_le_fallthru:\n\t"
2457 "lea 0x8(%rsp),%rsp\n\t"
2467 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2470 "cmp %rax,(%rsp)\n\t"
2471 "jng .Lamd64_gt_fallthru\n\t"
2472 "lea 0x8(%rsp),%rsp\n\t"
2474 /* jmp, but don't trust the assembler to choose the right jump */
2475 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2476 ".Lamd64_gt_fallthru:\n\t"
2477 "lea 0x8(%rsp),%rsp\n\t"
2487 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2490 "cmp %rax,(%rsp)\n\t"
2491 "jnge .Lamd64_ge_fallthru\n\t"
2492 ".Lamd64_ge_jump:\n\t"
2493 "lea 0x8(%rsp),%rsp\n\t"
2495 /* jmp, but don't trust the assembler to choose the right jump */
2496 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2497 ".Lamd64_ge_fallthru:\n\t"
2498 "lea 0x8(%rsp),%rsp\n\t"
2507 struct emit_ops amd64_emit_ops
=
2509 amd64_emit_prologue
,
2510 amd64_emit_epilogue
,
2515 amd64_emit_rsh_signed
,
2516 amd64_emit_rsh_unsigned
,
2524 amd64_emit_less_signed
,
2525 amd64_emit_less_unsigned
,
2529 amd64_write_goto_address
,
2534 amd64_emit_stack_flush
,
2535 amd64_emit_zero_ext
,
2537 amd64_emit_stack_adjust
,
2538 amd64_emit_int_call_1
,
2539 amd64_emit_void_call_2
,
2548 #endif /* __x86_64__ */
2551 i386_emit_prologue (void)
2553 EMIT_ASM32 (i386_prologue
,
2557 /* At this point, the raw regs base address is at 8(%ebp), and the
2558 value pointer is at 12(%ebp). */
2562 i386_emit_epilogue (void)
2564 EMIT_ASM32 (i386_epilogue
,
2565 "mov 12(%ebp),%ecx\n\t"
2566 "mov %eax,(%ecx)\n\t"
2567 "mov %ebx,0x4(%ecx)\n\t"
2575 i386_emit_add (void)
2577 EMIT_ASM32 (i386_add
,
2578 "add (%esp),%eax\n\t"
2579 "adc 0x4(%esp),%ebx\n\t"
2580 "lea 0x8(%esp),%esp");
2584 i386_emit_sub (void)
2586 EMIT_ASM32 (i386_sub
,
2587 "subl %eax,(%esp)\n\t"
2588 "sbbl %ebx,4(%esp)\n\t"
2594 i386_emit_mul (void)
2600 i386_emit_lsh (void)
2606 i386_emit_rsh_signed (void)
2612 i386_emit_rsh_unsigned (void)
2618 i386_emit_ext (int arg
)
2623 EMIT_ASM32 (i386_ext_8
,
2626 "movl %eax,%ebx\n\t"
2630 EMIT_ASM32 (i386_ext_16
,
2632 "movl %eax,%ebx\n\t"
2636 EMIT_ASM32 (i386_ext_32
,
2637 "movl %eax,%ebx\n\t"
2646 i386_emit_log_not (void)
2648 EMIT_ASM32 (i386_log_not
,
2650 "test %eax,%eax\n\t"
2657 i386_emit_bit_and (void)
2659 EMIT_ASM32 (i386_and
,
2660 "and (%esp),%eax\n\t"
2661 "and 0x4(%esp),%ebx\n\t"
2662 "lea 0x8(%esp),%esp");
2666 i386_emit_bit_or (void)
2668 EMIT_ASM32 (i386_or
,
2669 "or (%esp),%eax\n\t"
2670 "or 0x4(%esp),%ebx\n\t"
2671 "lea 0x8(%esp),%esp");
2675 i386_emit_bit_xor (void)
2677 EMIT_ASM32 (i386_xor
,
2678 "xor (%esp),%eax\n\t"
2679 "xor 0x4(%esp),%ebx\n\t"
2680 "lea 0x8(%esp),%esp");
2684 i386_emit_bit_not (void)
2686 EMIT_ASM32 (i386_bit_not
,
2687 "xor $0xffffffff,%eax\n\t"
2688 "xor $0xffffffff,%ebx\n\t");
2692 i386_emit_equal (void)
2694 EMIT_ASM32 (i386_equal
,
2695 "cmpl %ebx,4(%esp)\n\t"
2696 "jne .Li386_equal_false\n\t"
2697 "cmpl %eax,(%esp)\n\t"
2698 "je .Li386_equal_true\n\t"
2699 ".Li386_equal_false:\n\t"
2701 "jmp .Li386_equal_end\n\t"
2702 ".Li386_equal_true:\n\t"
2704 ".Li386_equal_end:\n\t"
2706 "lea 0x8(%esp),%esp");
2710 i386_emit_less_signed (void)
2712 EMIT_ASM32 (i386_less_signed
,
2713 "cmpl %ebx,4(%esp)\n\t"
2714 "jl .Li386_less_signed_true\n\t"
2715 "jne .Li386_less_signed_false\n\t"
2716 "cmpl %eax,(%esp)\n\t"
2717 "jl .Li386_less_signed_true\n\t"
2718 ".Li386_less_signed_false:\n\t"
2720 "jmp .Li386_less_signed_end\n\t"
2721 ".Li386_less_signed_true:\n\t"
2723 ".Li386_less_signed_end:\n\t"
2725 "lea 0x8(%esp),%esp");
2729 i386_emit_less_unsigned (void)
2731 EMIT_ASM32 (i386_less_unsigned
,
2732 "cmpl %ebx,4(%esp)\n\t"
2733 "jb .Li386_less_unsigned_true\n\t"
2734 "jne .Li386_less_unsigned_false\n\t"
2735 "cmpl %eax,(%esp)\n\t"
2736 "jb .Li386_less_unsigned_true\n\t"
2737 ".Li386_less_unsigned_false:\n\t"
2739 "jmp .Li386_less_unsigned_end\n\t"
2740 ".Li386_less_unsigned_true:\n\t"
2742 ".Li386_less_unsigned_end:\n\t"
2744 "lea 0x8(%esp),%esp");
2748 i386_emit_ref (int size
)
2753 EMIT_ASM32 (i386_ref1
,
2757 EMIT_ASM32 (i386_ref2
,
2761 EMIT_ASM32 (i386_ref4
,
2762 "movl (%eax),%eax");
2765 EMIT_ASM32 (i386_ref8
,
2766 "movl 4(%eax),%ebx\n\t"
2767 "movl (%eax),%eax");
2773 i386_emit_if_goto (int *offset_p
, int *size_p
)
2775 EMIT_ASM32 (i386_if_goto
,
2781 /* Don't trust the assembler to choose the right jump */
2782 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2785 *offset_p
= 11; /* be sure that this matches the sequence above */
2791 i386_emit_goto (int *offset_p
, int *size_p
)
2793 EMIT_ASM32 (i386_goto
,
2794 /* Don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2803 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2805 int diff
= (to
- (from
+ size
));
2806 unsigned char buf
[sizeof (int)];
2808 /* We're only doing 4-byte sizes at the moment. */
2815 memcpy (buf
, &diff
, sizeof (int));
2816 write_inferior_memory (from
, buf
, sizeof (int));
2820 i386_emit_const (LONGEST num
)
2822 unsigned char buf
[16];
2824 CORE_ADDR buildaddr
= current_insn_ptr
;
2827 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2828 lo
= num
& 0xffffffff;
2829 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2831 hi
= ((num
>> 32) & 0xffffffff);
2834 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2835 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2840 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2842 append_insns (&buildaddr
, i
, buf
);
2843 current_insn_ptr
= buildaddr
;
2847 i386_emit_call (CORE_ADDR fn
)
2849 unsigned char buf
[16];
2851 CORE_ADDR buildaddr
;
2853 buildaddr
= current_insn_ptr
;
2855 buf
[i
++] = 0xe8; /* call <reladdr> */
2856 offset
= ((int) fn
) - (buildaddr
+ 5);
2857 memcpy (buf
+ 1, &offset
, 4);
2858 append_insns (&buildaddr
, 5, buf
);
2859 current_insn_ptr
= buildaddr
;
2863 i386_emit_reg (int reg
)
2865 unsigned char buf
[16];
2867 CORE_ADDR buildaddr
;
2869 EMIT_ASM32 (i386_reg_a
,
2871 buildaddr
= current_insn_ptr
;
2873 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2874 memcpy (&buf
[i
], ®
, sizeof (reg
));
2876 append_insns (&buildaddr
, i
, buf
);
2877 current_insn_ptr
= buildaddr
;
2878 EMIT_ASM32 (i386_reg_b
,
2879 "mov %eax,4(%esp)\n\t"
2880 "mov 8(%ebp),%eax\n\t"
2882 i386_emit_call (get_raw_reg_func_addr ());
2883 EMIT_ASM32 (i386_reg_c
,
2885 "lea 0x8(%esp),%esp");
2889 i386_emit_pop (void)
2891 EMIT_ASM32 (i386_pop
,
2897 i386_emit_stack_flush (void)
2899 EMIT_ASM32 (i386_stack_flush
,
2905 i386_emit_zero_ext (int arg
)
2910 EMIT_ASM32 (i386_zero_ext_8
,
2911 "and $0xff,%eax\n\t"
2915 EMIT_ASM32 (i386_zero_ext_16
,
2916 "and $0xffff,%eax\n\t"
2920 EMIT_ASM32 (i386_zero_ext_32
,
2929 i386_emit_swap (void)
2931 EMIT_ASM32 (i386_swap
,
2941 i386_emit_stack_adjust (int n
)
2943 unsigned char buf
[16];
2945 CORE_ADDR buildaddr
= current_insn_ptr
;
2948 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2952 append_insns (&buildaddr
, i
, buf
);
2953 current_insn_ptr
= buildaddr
;
2956 /* FN's prototype is `LONGEST(*fn)(int)'. */
2959 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2961 unsigned char buf
[16];
2963 CORE_ADDR buildaddr
;
2965 EMIT_ASM32 (i386_int_call_1_a
,
2966 /* Reserve a bit of stack space. */
2968 /* Put the one argument on the stack. */
2969 buildaddr
= current_insn_ptr
;
2971 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2974 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2976 append_insns (&buildaddr
, i
, buf
);
2977 current_insn_ptr
= buildaddr
;
2978 i386_emit_call (fn
);
2979 EMIT_ASM32 (i386_int_call_1_c
,
2981 "lea 0x8(%esp),%esp");
2984 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2987 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2989 unsigned char buf
[16];
2991 CORE_ADDR buildaddr
;
2993 EMIT_ASM32 (i386_void_call_2_a
,
2994 /* Preserve %eax only; we don't have to worry about %ebx. */
2996 /* Reserve a bit of stack space for arguments. */
2997 "sub $0x10,%esp\n\t"
2998 /* Copy "top" to the second argument position. (Note that
2999 we can't assume function won't scribble on its
3000 arguments, so don't try to restore from this.) */
3001 "mov %eax,4(%esp)\n\t"
3002 "mov %ebx,8(%esp)");
3003 /* Put the first argument on the stack. */
3004 buildaddr
= current_insn_ptr
;
3006 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3009 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3011 append_insns (&buildaddr
, i
, buf
);
3012 current_insn_ptr
= buildaddr
;
3013 i386_emit_call (fn
);
3014 EMIT_ASM32 (i386_void_call_2_b
,
3015 "lea 0x10(%esp),%esp\n\t"
3016 /* Restore original stack top. */
3022 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3025 /* Check low half first, more likely to be decider */
3026 "cmpl %eax,(%esp)\n\t"
3027 "jne .Leq_fallthru\n\t"
3028 "cmpl %ebx,4(%esp)\n\t"
3029 "jne .Leq_fallthru\n\t"
3030 "lea 0x8(%esp),%esp\n\t"
3033 /* jmp, but don't trust the assembler to choose the right jump */
3034 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3035 ".Leq_fallthru:\n\t"
3036 "lea 0x8(%esp),%esp\n\t"
3047 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3050 /* Check low half first, more likely to be decider */
3051 "cmpl %eax,(%esp)\n\t"
3053 "cmpl %ebx,4(%esp)\n\t"
3054 "je .Lne_fallthru\n\t"
3056 "lea 0x8(%esp),%esp\n\t"
3059 /* jmp, but don't trust the assembler to choose the right jump */
3060 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3061 ".Lne_fallthru:\n\t"
3062 "lea 0x8(%esp),%esp\n\t"
3073 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3076 "cmpl %ebx,4(%esp)\n\t"
3078 "jne .Llt_fallthru\n\t"
3079 "cmpl %eax,(%esp)\n\t"
3080 "jnl .Llt_fallthru\n\t"
3082 "lea 0x8(%esp),%esp\n\t"
3085 /* jmp, but don't trust the assembler to choose the right jump */
3086 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3087 ".Llt_fallthru:\n\t"
3088 "lea 0x8(%esp),%esp\n\t"
3099 i386_emit_le_goto (int *offset_p
, int *size_p
)
3102 "cmpl %ebx,4(%esp)\n\t"
3104 "jne .Lle_fallthru\n\t"
3105 "cmpl %eax,(%esp)\n\t"
3106 "jnle .Lle_fallthru\n\t"
3108 "lea 0x8(%esp),%esp\n\t"
3111 /* jmp, but don't trust the assembler to choose the right jump */
3112 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3113 ".Lle_fallthru:\n\t"
3114 "lea 0x8(%esp),%esp\n\t"
3125 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3128 "cmpl %ebx,4(%esp)\n\t"
3130 "jne .Lgt_fallthru\n\t"
3131 "cmpl %eax,(%esp)\n\t"
3132 "jng .Lgt_fallthru\n\t"
3134 "lea 0x8(%esp),%esp\n\t"
3137 /* jmp, but don't trust the assembler to choose the right jump */
3138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3139 ".Lgt_fallthru:\n\t"
3140 "lea 0x8(%esp),%esp\n\t"
3151 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3154 "cmpl %ebx,4(%esp)\n\t"
3156 "jne .Lge_fallthru\n\t"
3157 "cmpl %eax,(%esp)\n\t"
3158 "jnge .Lge_fallthru\n\t"
3160 "lea 0x8(%esp),%esp\n\t"
3163 /* jmp, but don't trust the assembler to choose the right jump */
3164 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3165 ".Lge_fallthru:\n\t"
3166 "lea 0x8(%esp),%esp\n\t"
3176 struct emit_ops i386_emit_ops
=
3184 i386_emit_rsh_signed
,
3185 i386_emit_rsh_unsigned
,
3193 i386_emit_less_signed
,
3194 i386_emit_less_unsigned
,
3198 i386_write_goto_address
,
3203 i386_emit_stack_flush
,
3206 i386_emit_stack_adjust
,
3207 i386_emit_int_call_1
,
3208 i386_emit_void_call_2
,
3218 static struct emit_ops
*
3222 if (is_64bit_tdesc ())
3223 return &amd64_emit_ops
;
3226 return &i386_emit_ops
;
3230 x86_supports_range_stepping (void)
3235 /* This is initialized assuming an amd64 target.
3236 x86_arch_setup will correct it for i386 or amd64 targets. */
3238 struct linux_target_ops the_low_target
=
3241 x86_linux_regs_info
,
3242 x86_cannot_fetch_register
,
3243 x86_cannot_store_register
,
3244 NULL
, /* fetch_register */
3252 x86_supports_z_point_type
,
3255 x86_stopped_by_watchpoint
,
3256 x86_stopped_data_address
,
3257 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3258 native i386 case (no registers smaller than an xfer unit), and are not
3259 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3262 /* need to fix up i386 siginfo if host is amd64 */
3264 x86_linux_new_process
,
3265 x86_linux_new_thread
,
3266 x86_linux_prepare_to_resume
,
3267 x86_linux_process_qsupported
,
3268 x86_supports_tracepoints
,
3269 x86_get_thread_area
,
3270 x86_install_fast_tracepoint_jump_pad
,
3272 x86_get_min_fast_tracepoint_insn_len
,
3273 x86_supports_range_stepping
,
3277 initialize_low_arch (void)
3279 /* Initialize the Linux target descriptions. */
3281 init_registers_amd64_linux ();
3282 init_registers_amd64_avx_linux ();
3283 init_registers_amd64_avx512_linux ();
3284 init_registers_amd64_mpx_linux ();
3286 init_registers_x32_linux ();
3287 init_registers_x32_avx_linux ();
3288 init_registers_x32_avx512_linux ();
3290 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3291 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3292 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3294 init_registers_i386_linux ();
3295 init_registers_i386_mmx_linux ();
3296 init_registers_i386_avx_linux ();
3297 init_registers_i386_avx512_linux ();
3298 init_registers_i386_mpx_linux ();
3300 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3301 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3302 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3304 initialize_regsets_info (&x86_regsets_info
);