1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42 #include "nat/x86-linux-dregs.h"
45 /* Defined in auto-generated file amd64-linux.c. */
46 void init_registers_amd64_linux (void);
47 extern const struct target_desc
*tdesc_amd64_linux
;
49 /* Defined in auto-generated file amd64-avx-linux.c. */
50 void init_registers_amd64_avx_linux (void);
51 extern const struct target_desc
*tdesc_amd64_avx_linux
;
53 /* Defined in auto-generated file amd64-avx512-linux.c. */
54 void init_registers_amd64_avx512_linux (void);
55 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
57 /* Defined in auto-generated file amd64-mpx-linux.c. */
58 void init_registers_amd64_mpx_linux (void);
59 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
61 /* Defined in auto-generated file x32-linux.c. */
62 void init_registers_x32_linux (void);
63 extern const struct target_desc
*tdesc_x32_linux
;
65 /* Defined in auto-generated file x32-avx-linux.c. */
66 void init_registers_x32_avx_linux (void);
67 extern const struct target_desc
*tdesc_x32_avx_linux
;
69 /* Defined in auto-generated file x32-avx512-linux.c. */
70 void init_registers_x32_avx512_linux (void);
71 extern const struct target_desc
*tdesc_x32_avx512_linux
;
75 /* Defined in auto-generated file i386-linux.c. */
76 void init_registers_i386_linux (void);
77 extern const struct target_desc
*tdesc_i386_linux
;
79 /* Defined in auto-generated file i386-mmx-linux.c. */
80 void init_registers_i386_mmx_linux (void);
81 extern const struct target_desc
*tdesc_i386_mmx_linux
;
83 /* Defined in auto-generated file i386-avx-linux.c. */
84 void init_registers_i386_avx_linux (void);
85 extern const struct target_desc
*tdesc_i386_avx_linux
;
87 /* Defined in auto-generated file i386-avx512-linux.c. */
88 void init_registers_i386_avx512_linux (void);
89 extern const struct target_desc
*tdesc_i386_avx512_linux
;
91 /* Defined in auto-generated file i386-mpx-linux.c. */
92 void init_registers_i386_mpx_linux (void);
93 extern const struct target_desc
*tdesc_i386_mpx_linux
;
96 static struct target_desc
*tdesc_amd64_linux_no_xml
;
98 static struct target_desc
*tdesc_i386_linux_no_xml
;
101 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
102 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
104 /* Backward compatibility for gdb without XML support. */
106 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
107 <architecture>i386</architecture>\
108 <osabi>GNU/Linux</osabi>\
112 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
113 <architecture>i386:x86-64</architecture>\
114 <osabi>GNU/Linux</osabi>\
119 #include <sys/procfs.h>
120 #include <sys/ptrace.h>
123 #ifndef PTRACE_GETREGSET
124 #define PTRACE_GETREGSET 0x4204
127 #ifndef PTRACE_SETREGSET
128 #define PTRACE_SETREGSET 0x4205
132 #ifndef PTRACE_GET_THREAD_AREA
133 #define PTRACE_GET_THREAD_AREA 25
136 /* This definition comes from prctl.h, but some kernels may not have it. */
137 #ifndef PTRACE_ARCH_PRCTL
138 #define PTRACE_ARCH_PRCTL 30
141 /* The following definitions come from prctl.h, but may be absent
142 for certain configurations. */
144 #define ARCH_SET_GS 0x1001
145 #define ARCH_SET_FS 0x1002
146 #define ARCH_GET_FS 0x1003
147 #define ARCH_GET_GS 0x1004
150 /* Per-process arch-specific data we want to keep. */
152 struct arch_process_info
154 struct x86_debug_reg_state debug_reg_state
;
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162 static /*const*/ int i386_regmap
[] =
164 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
165 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
166 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
167 DS
* 8, ES
* 8, FS
* 8, GS
* 8
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
172 /* So code below doesn't have to care, i386 or amd64. */
173 #define ORIG_EAX ORIG_RAX
176 static const int x86_64_regmap
[] =
178 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
179 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
180 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
181 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
182 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
183 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
191 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
192 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1
203 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
204 #define X86_64_USER_REGS (GS + 1)
206 #else /* ! __x86_64__ */
208 /* Mapping between the general-purpose registers in `struct user'
209 format and GDB's register array layout. */
210 static /*const*/ int i386_regmap
[] =
212 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
213 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
214 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
215 DS
* 4, ES
* 4, FS
* 4, GS
* 4
218 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
226 /* Returns true if the current inferior belongs to a x86-64 process,
230 is_64bit_tdesc (void)
232 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
234 return register_size (regcache
->tdesc
, 0) == 8;
240 /* Called by libthread_db. */
243 ps_get_thread_area (const struct ps_prochandle
*ph
,
244 lwpid_t lwpid
, int idx
, void **base
)
247 int use_64bit
= is_64bit_tdesc ();
254 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
258 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
269 unsigned int desc
[4];
271 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
272 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
275 /* Ensure we properly extend the value to 64-bits for x86_64. */
276 *base
= (void *) (uintptr_t) desc
[1];
281 /* Get the thread area address. This is used to recognize which
282 thread is which when tracing with the in-process agent library. We
283 don't read anything from the address, and treat it as opaque; it's
284 the address itself that we assume is unique per-thread. */
287 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
290 int use_64bit
= is_64bit_tdesc ();
295 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
297 *addr
= (CORE_ADDR
) (uintptr_t) base
;
306 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
307 struct thread_info
*thr
= get_lwp_thread (lwp
);
308 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
309 unsigned int desc
[4];
311 const int reg_thread_area
= 3; /* bits to scale down register value. */
314 collect_register_by_name (regcache
, "gs", &gs
);
316 idx
= gs
>> reg_thread_area
;
318 if (ptrace (PTRACE_GET_THREAD_AREA
,
320 (void *) (long) idx
, (unsigned long) &desc
) < 0)
331 x86_cannot_store_register (int regno
)
334 if (is_64bit_tdesc ())
338 return regno
>= I386_NUM_REGS
;
342 x86_cannot_fetch_register (int regno
)
345 if (is_64bit_tdesc ())
349 return regno
>= I386_NUM_REGS
;
353 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
358 if (register_size (regcache
->tdesc
, 0) == 8)
360 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
361 if (x86_64_regmap
[i
] != -1)
362 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 collect_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
379 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
384 if (register_size (regcache
->tdesc
, 0) == 8)
386 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
387 if (x86_64_regmap
[i
] != -1)
388 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
393 for (i
= 0; i
< I386_NUM_REGS
; i
++)
394 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
396 supply_register_by_name (regcache
, "orig_eax",
397 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
401 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
406 i387_cache_to_fsave (regcache
, buf
);
411 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
414 i387_fxsave_to_cache (regcache
, buf
);
416 i387_fsave_to_cache (regcache
, buf
);
423 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
425 i387_cache_to_fxsave (regcache
, buf
);
429 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
431 i387_fxsave_to_cache (regcache
, buf
);
437 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
439 i387_cache_to_xsave (regcache
, buf
);
443 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
445 i387_xsave_to_cache (regcache
, buf
);
448 /* ??? The non-biarch i386 case stores all the i387 regs twice.
449 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
450 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
451 doesn't work. IWBN to avoid the duplication in the case where it
452 does work. Maybe the arch_setup routine could check whether it works
453 and update the supported regsets accordingly. */
455 static struct regset_info x86_regsets
[] =
457 #ifdef HAVE_PTRACE_GETREGS
458 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
460 x86_fill_gregset
, x86_store_gregset
},
461 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
462 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
464 # ifdef HAVE_PTRACE_GETFPXREGS
465 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
467 x86_fill_fpxregset
, x86_store_fpxregset
},
470 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
472 x86_fill_fpregset
, x86_store_fpregset
},
473 #endif /* HAVE_PTRACE_GETREGS */
474 { 0, 0, 0, -1, -1, NULL
, NULL
}
478 x86_get_pc (struct regcache
*regcache
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
485 collect_register_by_name (regcache
, "rip", &pc
);
486 return (CORE_ADDR
) pc
;
491 collect_register_by_name (regcache
, "eip", &pc
);
492 return (CORE_ADDR
) pc
;
497 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
499 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
503 unsigned long newpc
= pc
;
504 supply_register_by_name (regcache
, "rip", &newpc
);
508 unsigned int newpc
= pc
;
509 supply_register_by_name (regcache
, "eip", &newpc
);
513 static const unsigned char x86_breakpoint
[] = { 0xCC };
514 #define x86_breakpoint_len 1
517 x86_breakpoint_at (CORE_ADDR pc
)
521 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Low-level function vector. */
529 struct x86_dr_low_type x86_dr_low
=
531 x86_linux_dr_set_control
,
532 x86_linux_dr_set_addr
,
533 x86_linux_dr_get_addr
,
534 x86_linux_dr_get_status
,
535 x86_linux_dr_get_control
,
539 /* Breakpoint/Watchpoint support. */
542 x86_supports_z_point_type (char z_type
)
548 case Z_PACKET_WRITE_WP
:
549 case Z_PACKET_ACCESS_WP
:
557 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
558 int size
, struct raw_breakpoint
*bp
)
560 struct process_info
*proc
= current_process ();
564 case raw_bkpt_type_sw
:
565 return insert_memory_breakpoint (bp
);
567 case raw_bkpt_type_hw
:
568 case raw_bkpt_type_write_wp
:
569 case raw_bkpt_type_access_wp
:
571 enum target_hw_bp_type hw_type
572 = raw_bkpt_type_to_target_hw_bp_type (type
);
573 struct x86_debug_reg_state
*state
574 = &proc
->priv
->arch_private
->debug_reg_state
;
576 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
586 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
587 int size
, struct raw_breakpoint
*bp
)
589 struct process_info
*proc
= current_process ();
593 case raw_bkpt_type_sw
:
594 return remove_memory_breakpoint (bp
);
596 case raw_bkpt_type_hw
:
597 case raw_bkpt_type_write_wp
:
598 case raw_bkpt_type_access_wp
:
600 enum target_hw_bp_type hw_type
601 = raw_bkpt_type_to_target_hw_bp_type (type
);
602 struct x86_debug_reg_state
*state
603 = &proc
->priv
->arch_private
->debug_reg_state
;
605 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
614 x86_stopped_by_watchpoint (void)
616 struct process_info
*proc
= current_process ();
617 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
621 x86_stopped_data_address (void)
623 struct process_info
*proc
= current_process ();
625 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
631 /* Called when a new process is created. */
633 static struct arch_process_info
*
634 x86_linux_new_process (void)
636 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
638 x86_low_init_dregs (&info
->debug_reg_state
);
643 /* See nat/x86-dregs.h. */
645 struct x86_debug_reg_state
*
646 x86_debug_reg_state (pid_t pid
)
648 struct process_info
*proc
= find_process_pid (pid
);
650 return &proc
->priv
->arch_private
->debug_reg_state
;
653 /* When GDBSERVER is built as a 64-bit application on linux, the
654 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
655 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
656 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
657 conversion in-place ourselves. */
659 /* These types below (compat_*) define a siginfo type that is layout
660 compatible with the siginfo type exported by the 32-bit userspace
665 typedef int compat_int_t
;
666 typedef unsigned int compat_uptr_t
;
668 typedef int compat_time_t
;
669 typedef int compat_timer_t
;
670 typedef int compat_clock_t
;
672 struct compat_timeval
674 compat_time_t tv_sec
;
678 typedef union compat_sigval
680 compat_int_t sival_int
;
681 compat_uptr_t sival_ptr
;
684 typedef struct compat_siginfo
692 int _pad
[((128 / sizeof (int)) - 3)];
701 /* POSIX.1b timers */
706 compat_sigval_t _sigval
;
709 /* POSIX.1b signals */
714 compat_sigval_t _sigval
;
723 compat_clock_t _utime
;
724 compat_clock_t _stime
;
727 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
742 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
743 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
745 typedef struct compat_x32_siginfo
753 int _pad
[((128 / sizeof (int)) - 3)];
762 /* POSIX.1b timers */
767 compat_sigval_t _sigval
;
770 /* POSIX.1b signals */
775 compat_sigval_t _sigval
;
784 compat_x32_clock_t _utime
;
785 compat_x32_clock_t _stime
;
788 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
801 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
803 #define cpt_si_pid _sifields._kill._pid
804 #define cpt_si_uid _sifields._kill._uid
805 #define cpt_si_timerid _sifields._timer._tid
806 #define cpt_si_overrun _sifields._timer._overrun
807 #define cpt_si_status _sifields._sigchld._status
808 #define cpt_si_utime _sifields._sigchld._utime
809 #define cpt_si_stime _sifields._sigchld._stime
810 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
811 #define cpt_si_addr _sifields._sigfault._addr
812 #define cpt_si_band _sifields._sigpoll._band
813 #define cpt_si_fd _sifields._sigpoll._fd
815 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
816 In their place is si_timer1,si_timer2. */
818 #define si_timerid si_timer1
821 #define si_overrun si_timer2
825 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
827 memset (to
, 0, sizeof (*to
));
829 to
->si_signo
= from
->si_signo
;
830 to
->si_errno
= from
->si_errno
;
831 to
->si_code
= from
->si_code
;
833 if (to
->si_code
== SI_TIMER
)
835 to
->cpt_si_timerid
= from
->si_timerid
;
836 to
->cpt_si_overrun
= from
->si_overrun
;
837 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
839 else if (to
->si_code
== SI_USER
)
841 to
->cpt_si_pid
= from
->si_pid
;
842 to
->cpt_si_uid
= from
->si_uid
;
844 else if (to
->si_code
< 0)
846 to
->cpt_si_pid
= from
->si_pid
;
847 to
->cpt_si_uid
= from
->si_uid
;
848 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
852 switch (to
->si_signo
)
855 to
->cpt_si_pid
= from
->si_pid
;
856 to
->cpt_si_uid
= from
->si_uid
;
857 to
->cpt_si_status
= from
->si_status
;
858 to
->cpt_si_utime
= from
->si_utime
;
859 to
->cpt_si_stime
= from
->si_stime
;
865 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
868 to
->cpt_si_band
= from
->si_band
;
869 to
->cpt_si_fd
= from
->si_fd
;
872 to
->cpt_si_pid
= from
->si_pid
;
873 to
->cpt_si_uid
= from
->si_uid
;
874 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
881 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
883 memset (to
, 0, sizeof (*to
));
885 to
->si_signo
= from
->si_signo
;
886 to
->si_errno
= from
->si_errno
;
887 to
->si_code
= from
->si_code
;
889 if (to
->si_code
== SI_TIMER
)
891 to
->si_timerid
= from
->cpt_si_timerid
;
892 to
->si_overrun
= from
->cpt_si_overrun
;
893 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
895 else if (to
->si_code
== SI_USER
)
897 to
->si_pid
= from
->cpt_si_pid
;
898 to
->si_uid
= from
->cpt_si_uid
;
900 else if (to
->si_code
< 0)
902 to
->si_pid
= from
->cpt_si_pid
;
903 to
->si_uid
= from
->cpt_si_uid
;
904 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
908 switch (to
->si_signo
)
911 to
->si_pid
= from
->cpt_si_pid
;
912 to
->si_uid
= from
->cpt_si_uid
;
913 to
->si_status
= from
->cpt_si_status
;
914 to
->si_utime
= from
->cpt_si_utime
;
915 to
->si_stime
= from
->cpt_si_stime
;
921 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
924 to
->si_band
= from
->cpt_si_band
;
925 to
->si_fd
= from
->cpt_si_fd
;
928 to
->si_pid
= from
->cpt_si_pid
;
929 to
->si_uid
= from
->cpt_si_uid
;
930 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
937 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
940 memset (to
, 0, sizeof (*to
));
942 to
->si_signo
= from
->si_signo
;
943 to
->si_errno
= from
->si_errno
;
944 to
->si_code
= from
->si_code
;
946 if (to
->si_code
== SI_TIMER
)
948 to
->cpt_si_timerid
= from
->si_timerid
;
949 to
->cpt_si_overrun
= from
->si_overrun
;
950 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
952 else if (to
->si_code
== SI_USER
)
954 to
->cpt_si_pid
= from
->si_pid
;
955 to
->cpt_si_uid
= from
->si_uid
;
957 else if (to
->si_code
< 0)
959 to
->cpt_si_pid
= from
->si_pid
;
960 to
->cpt_si_uid
= from
->si_uid
;
961 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
965 switch (to
->si_signo
)
968 to
->cpt_si_pid
= from
->si_pid
;
969 to
->cpt_si_uid
= from
->si_uid
;
970 to
->cpt_si_status
= from
->si_status
;
971 to
->cpt_si_utime
= from
->si_utime
;
972 to
->cpt_si_stime
= from
->si_stime
;
978 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
981 to
->cpt_si_band
= from
->si_band
;
982 to
->cpt_si_fd
= from
->si_fd
;
985 to
->cpt_si_pid
= from
->si_pid
;
986 to
->cpt_si_uid
= from
->si_uid
;
987 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
994 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
995 compat_x32_siginfo_t
*from
)
997 memset (to
, 0, sizeof (*to
));
999 to
->si_signo
= from
->si_signo
;
1000 to
->si_errno
= from
->si_errno
;
1001 to
->si_code
= from
->si_code
;
1003 if (to
->si_code
== SI_TIMER
)
1005 to
->si_timerid
= from
->cpt_si_timerid
;
1006 to
->si_overrun
= from
->cpt_si_overrun
;
1007 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1009 else if (to
->si_code
== SI_USER
)
1011 to
->si_pid
= from
->cpt_si_pid
;
1012 to
->si_uid
= from
->cpt_si_uid
;
1014 else if (to
->si_code
< 0)
1016 to
->si_pid
= from
->cpt_si_pid
;
1017 to
->si_uid
= from
->cpt_si_uid
;
1018 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1022 switch (to
->si_signo
)
1025 to
->si_pid
= from
->cpt_si_pid
;
1026 to
->si_uid
= from
->cpt_si_uid
;
1027 to
->si_status
= from
->cpt_si_status
;
1028 to
->si_utime
= from
->cpt_si_utime
;
1029 to
->si_stime
= from
->cpt_si_stime
;
1035 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1038 to
->si_band
= from
->cpt_si_band
;
1039 to
->si_fd
= from
->cpt_si_fd
;
1042 to
->si_pid
= from
->cpt_si_pid
;
1043 to
->si_uid
= from
->cpt_si_uid
;
1044 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1050 #endif /* __x86_64__ */
1052 /* Convert a native/host siginfo object, into/from the siginfo in the
1053 layout of the inferiors' architecture. Returns true if any
1054 conversion was done; false otherwise. If DIRECTION is 1, then copy
1055 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1059 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1062 unsigned int machine
;
1063 int tid
= lwpid_of (current_thread
);
1064 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1066 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1067 if (!is_64bit_tdesc ())
1069 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1072 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1074 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1078 /* No fixup for native x32 GDB. */
1079 else if (!is_elf64
&& sizeof (void *) == 8)
1081 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1084 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1087 siginfo_from_compat_x32_siginfo (native
,
1088 (struct compat_x32_siginfo
*) inf
);
1099 /* Format of XSAVE extended state is:
1102 fxsave_bytes[0..463]
1103 sw_usable_bytes[464..511]
1104 xstate_hdr_bytes[512..575]
1109 Same memory layout will be used for the coredump NT_X86_XSTATE
1110 representing the XSAVE extended state registers.
1112 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1113 extended state mask, which is the same as the extended control register
1114 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1115 together with the mask saved in the xstate_hdr_bytes to determine what
1116 states the processor/OS supports and what state, used or initialized,
1117 the process/thread is in. */
1118 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1120 /* Does the current host support the GETFPXREGS request? The header
1121 file may or may not define it, and even if it is defined, the
1122 kernel will return EIO if it's running on a pre-SSE processor. */
1123 int have_ptrace_getfpxregs
=
1124 #ifdef HAVE_PTRACE_GETFPXREGS
1131 /* Does the current host support PTRACE_GETREGSET? */
1132 static int have_ptrace_getregset
= -1;
1134 /* Get Linux/x86 target description from running target. */
1136 static const struct target_desc
*
1137 x86_linux_read_description (void)
1139 unsigned int machine
;
1143 static uint64_t xcr0
;
1144 struct regset_info
*regset
;
1146 tid
= lwpid_of (current_thread
);
1148 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1150 if (sizeof (void *) == 4)
1153 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1155 else if (machine
== EM_X86_64
)
1156 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1160 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1161 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1163 elf_fpxregset_t fpxregs
;
1165 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1167 have_ptrace_getfpxregs
= 0;
1168 have_ptrace_getregset
= 0;
1169 return tdesc_i386_mmx_linux
;
1172 have_ptrace_getfpxregs
= 1;
1178 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1180 /* Don't use XML. */
1182 if (machine
== EM_X86_64
)
1183 return tdesc_amd64_linux_no_xml
;
1186 return tdesc_i386_linux_no_xml
;
1189 if (have_ptrace_getregset
== -1)
1191 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1194 iov
.iov_base
= xstateregs
;
1195 iov
.iov_len
= sizeof (xstateregs
);
1197 /* Check if PTRACE_GETREGSET works. */
1198 if (ptrace (PTRACE_GETREGSET
, tid
,
1199 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1200 have_ptrace_getregset
= 0;
1203 have_ptrace_getregset
= 1;
1205 /* Get XCR0 from XSAVE extended state. */
1206 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1207 / sizeof (uint64_t))];
1209 /* Use PTRACE_GETREGSET if it is available. */
1210 for (regset
= x86_regsets
;
1211 regset
->fill_function
!= NULL
; regset
++)
1212 if (regset
->get_request
== PTRACE_GETREGSET
)
1213 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1214 else if (regset
->type
!= GENERAL_REGS
)
1219 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1220 xcr0_features
= (have_ptrace_getregset
1221 && (xcr0
& X86_XSTATE_ALL_MASK
));
1226 if (machine
== EM_X86_64
)
1233 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1235 case X86_XSTATE_AVX512_MASK
:
1236 return tdesc_amd64_avx512_linux
;
1238 case X86_XSTATE_MPX_MASK
:
1239 return tdesc_amd64_mpx_linux
;
1241 case X86_XSTATE_AVX_MASK
:
1242 return tdesc_amd64_avx_linux
;
1245 return tdesc_amd64_linux
;
1249 return tdesc_amd64_linux
;
1255 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1257 case X86_XSTATE_AVX512_MASK
:
1258 return tdesc_x32_avx512_linux
;
1260 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1261 case X86_XSTATE_AVX_MASK
:
1262 return tdesc_x32_avx_linux
;
1265 return tdesc_x32_linux
;
1269 return tdesc_x32_linux
;
1277 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1279 case (X86_XSTATE_AVX512_MASK
):
1280 return tdesc_i386_avx512_linux
;
1282 case (X86_XSTATE_MPX_MASK
):
1283 return tdesc_i386_mpx_linux
;
1285 case (X86_XSTATE_AVX_MASK
):
1286 return tdesc_i386_avx_linux
;
1289 return tdesc_i386_linux
;
1293 return tdesc_i386_linux
;
1296 gdb_assert_not_reached ("failed to return tdesc");
1299 /* Callback for find_inferior. Stops iteration when a thread with a
1300 given PID is found. */
1303 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1305 int pid
= *(int *) data
;
1307 return (ptid_get_pid (entry
->id
) == pid
);
1310 /* Callback for for_each_inferior. Calls the arch_setup routine for
1314 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1316 int pid
= ptid_get_pid (entry
->id
);
1318 /* Look up any thread of this processes. */
1320 = (struct thread_info
*) find_inferior (&all_threads
,
1321 same_process_callback
, &pid
);
1323 the_low_target
.arch_setup ();
1326 /* Update all the target description of all processes; a new GDB
1327 connected, and it may or not support xml target descriptions. */
1330 x86_linux_update_xmltarget (void)
1332 struct thread_info
*saved_thread
= current_thread
;
1334 /* Before changing the register cache's internal layout, flush the
1335 contents of the current valid caches back to the threads, and
1336 release the current regcache objects. */
1337 regcache_release ();
1339 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1341 current_thread
= saved_thread
;
1344 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1345 PTRACE_GETREGSET. */
1348 x86_linux_process_qsupported (const char *query
)
1350 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1351 with "i386" in qSupported query, it supports x86 XML target
1354 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1356 char *copy
= xstrdup (query
+ 13);
1359 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1361 if (strcmp (p
, "i386") == 0)
1371 x86_linux_update_xmltarget ();
1374 /* Common for x86/x86-64. */
1376 static struct regsets_info x86_regsets_info
=
1378 x86_regsets
, /* regsets */
1379 0, /* num_regsets */
1380 NULL
, /* disabled_regsets */
1384 static struct regs_info amd64_linux_regs_info
=
1386 NULL
, /* regset_bitmap */
1387 NULL
, /* usrregs_info */
1391 static struct usrregs_info i386_linux_usrregs_info
=
1397 static struct regs_info i386_linux_regs_info
=
1399 NULL
, /* regset_bitmap */
1400 &i386_linux_usrregs_info
,
1404 const struct regs_info
*
1405 x86_linux_regs_info (void)
1408 if (is_64bit_tdesc ())
1409 return &amd64_linux_regs_info
;
1412 return &i386_linux_regs_info
;
1415 /* Initialize the target description for the architecture of the
1419 x86_arch_setup (void)
1421 current_process ()->tdesc
= x86_linux_read_description ();
1425 x86_supports_tracepoints (void)
1431 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1433 write_inferior_memory (*to
, buf
, len
);
1438 push_opcode (unsigned char *buf
, char *op
)
1440 unsigned char *buf_org
= buf
;
1445 unsigned long ul
= strtoul (op
, &endptr
, 16);
1454 return buf
- buf_org
;
1459 /* Build a jump pad that saves registers and calls a collection
1460 function. Writes a jump instruction to the jump pad to
1461 JJUMPAD_INSN. The caller is responsible to write it in at the
1462 tracepoint address. */
1465 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1466 CORE_ADDR collector
,
1469 CORE_ADDR
*jump_entry
,
1470 CORE_ADDR
*trampoline
,
1471 ULONGEST
*trampoline_size
,
1472 unsigned char *jjump_pad_insn
,
1473 ULONGEST
*jjump_pad_insn_size
,
1474 CORE_ADDR
*adjusted_insn_addr
,
1475 CORE_ADDR
*adjusted_insn_addr_end
,
1478 unsigned char buf
[40];
1482 CORE_ADDR buildaddr
= *jump_entry
;
1484 /* Build the jump pad. */
1486 /* First, do tracepoint data collection. Save registers. */
1488 /* Need to ensure stack pointer saved first. */
1489 buf
[i
++] = 0x54; /* push %rsp */
1490 buf
[i
++] = 0x55; /* push %rbp */
1491 buf
[i
++] = 0x57; /* push %rdi */
1492 buf
[i
++] = 0x56; /* push %rsi */
1493 buf
[i
++] = 0x52; /* push %rdx */
1494 buf
[i
++] = 0x51; /* push %rcx */
1495 buf
[i
++] = 0x53; /* push %rbx */
1496 buf
[i
++] = 0x50; /* push %rax */
1497 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1498 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1499 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1500 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1501 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1502 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1503 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1504 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1505 buf
[i
++] = 0x9c; /* pushfq */
1506 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1508 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1509 i
+= sizeof (unsigned long);
1510 buf
[i
++] = 0x57; /* push %rdi */
1511 append_insns (&buildaddr
, i
, buf
);
1513 /* Stack space for the collecting_t object. */
1515 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1516 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1517 memcpy (buf
+ i
, &tpoint
, 8);
1519 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1520 i
+= push_opcode (&buf
[i
],
1521 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1522 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1523 append_insns (&buildaddr
, i
, buf
);
1527 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1528 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1530 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1531 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1532 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1533 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1534 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1535 append_insns (&buildaddr
, i
, buf
);
1537 /* Set up the gdb_collect call. */
1538 /* At this point, (stack pointer + 0x18) is the base of our saved
1542 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1543 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1545 /* tpoint address may be 64-bit wide. */
1546 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1547 memcpy (buf
+ i
, &tpoint
, 8);
1549 append_insns (&buildaddr
, i
, buf
);
1551 /* The collector function being in the shared library, may be
1552 >31-bits away off the jump pad. */
1554 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1555 memcpy (buf
+ i
, &collector
, 8);
1557 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1558 append_insns (&buildaddr
, i
, buf
);
1560 /* Clear the spin-lock. */
1562 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1563 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1564 memcpy (buf
+ i
, &lockaddr
, 8);
1566 append_insns (&buildaddr
, i
, buf
);
1568 /* Remove stack that had been used for the collect_t object. */
1570 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1571 append_insns (&buildaddr
, i
, buf
);
1573 /* Restore register state. */
1575 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1579 buf
[i
++] = 0x9d; /* popfq */
1580 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1581 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1582 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1583 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1584 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1585 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1586 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1587 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1588 buf
[i
++] = 0x58; /* pop %rax */
1589 buf
[i
++] = 0x5b; /* pop %rbx */
1590 buf
[i
++] = 0x59; /* pop %rcx */
1591 buf
[i
++] = 0x5a; /* pop %rdx */
1592 buf
[i
++] = 0x5e; /* pop %rsi */
1593 buf
[i
++] = 0x5f; /* pop %rdi */
1594 buf
[i
++] = 0x5d; /* pop %rbp */
1595 buf
[i
++] = 0x5c; /* pop %rsp */
1596 append_insns (&buildaddr
, i
, buf
);
1598 /* Now, adjust the original instruction to execute in the jump
1600 *adjusted_insn_addr
= buildaddr
;
1601 relocate_instruction (&buildaddr
, tpaddr
);
1602 *adjusted_insn_addr_end
= buildaddr
;
1604 /* Finally, write a jump back to the program. */
1606 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1607 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1610 "E.Jump back from jump pad too far from tracepoint "
1611 "(offset 0x%" PRIx64
" > int32).", loffset
);
1615 offset
= (int) loffset
;
1616 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1617 memcpy (buf
+ 1, &offset
, 4);
1618 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1620 /* The jump pad is now built. Wire in a jump to our jump pad. This
1621 is always done last (by our caller actually), so that we can
1622 install fast tracepoints with threads running. This relies on
1623 the agent's atomic write support. */
1624 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1625 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1628 "E.Jump pad too far from tracepoint "
1629 "(offset 0x%" PRIx64
" > int32).", loffset
);
1633 offset
= (int) loffset
;
1635 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1636 memcpy (buf
+ 1, &offset
, 4);
1637 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1638 *jjump_pad_insn_size
= sizeof (jump_insn
);
1640 /* Return the end address of our pad. */
1641 *jump_entry
= buildaddr
;
1646 #endif /* __x86_64__ */
1648 /* Build a jump pad that saves registers and calls a collection
1649 function. Writes a jump instruction to the jump pad to
1650 JJUMPAD_INSN. The caller is responsible to write it in at the
1651 tracepoint address. */
1654 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1655 CORE_ADDR collector
,
1658 CORE_ADDR
*jump_entry
,
1659 CORE_ADDR
*trampoline
,
1660 ULONGEST
*trampoline_size
,
1661 unsigned char *jjump_pad_insn
,
1662 ULONGEST
*jjump_pad_insn_size
,
1663 CORE_ADDR
*adjusted_insn_addr
,
1664 CORE_ADDR
*adjusted_insn_addr_end
,
1667 unsigned char buf
[0x100];
1669 CORE_ADDR buildaddr
= *jump_entry
;
1671 /* Build the jump pad. */
1673 /* First, do tracepoint data collection. Save registers. */
1675 buf
[i
++] = 0x60; /* pushad */
1676 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1677 *((int *)(buf
+ i
)) = (int) tpaddr
;
1679 buf
[i
++] = 0x9c; /* pushf */
1680 buf
[i
++] = 0x1e; /* push %ds */
1681 buf
[i
++] = 0x06; /* push %es */
1682 buf
[i
++] = 0x0f; /* push %fs */
1684 buf
[i
++] = 0x0f; /* push %gs */
1686 buf
[i
++] = 0x16; /* push %ss */
1687 buf
[i
++] = 0x0e; /* push %cs */
1688 append_insns (&buildaddr
, i
, buf
);
1690 /* Stack space for the collecting_t object. */
1692 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1694 /* Build the object. */
1695 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1696 memcpy (buf
+ i
, &tpoint
, 4);
1698 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1700 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1701 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1702 append_insns (&buildaddr
, i
, buf
);
1704 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1705 If we cared for it, this could be using xchg alternatively. */
1708 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1709 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1711 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1713 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1714 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1715 append_insns (&buildaddr
, i
, buf
);
1718 /* Set up arguments to the gdb_collect call. */
1720 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1721 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1722 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1723 append_insns (&buildaddr
, i
, buf
);
1726 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1727 append_insns (&buildaddr
, i
, buf
);
1730 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1731 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1733 append_insns (&buildaddr
, i
, buf
);
1735 buf
[0] = 0xe8; /* call <reladdr> */
1736 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1737 memcpy (buf
+ 1, &offset
, 4);
1738 append_insns (&buildaddr
, 5, buf
);
1739 /* Clean up after the call. */
1740 buf
[0] = 0x83; /* add $0x8,%esp */
1743 append_insns (&buildaddr
, 3, buf
);
1746 /* Clear the spin-lock. This would need the LOCK prefix on older
1749 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1750 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1751 memcpy (buf
+ i
, &lockaddr
, 4);
1753 append_insns (&buildaddr
, i
, buf
);
1756 /* Remove stack that had been used for the collect_t object. */
1758 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1759 append_insns (&buildaddr
, i
, buf
);
1762 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1765 buf
[i
++] = 0x17; /* pop %ss */
1766 buf
[i
++] = 0x0f; /* pop %gs */
1768 buf
[i
++] = 0x0f; /* pop %fs */
1770 buf
[i
++] = 0x07; /* pop %es */
1771 buf
[i
++] = 0x1f; /* pop %ds */
1772 buf
[i
++] = 0x9d; /* popf */
1773 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1776 buf
[i
++] = 0x61; /* popad */
1777 append_insns (&buildaddr
, i
, buf
);
1779 /* Now, adjust the original instruction to execute in the jump
1781 *adjusted_insn_addr
= buildaddr
;
1782 relocate_instruction (&buildaddr
, tpaddr
);
1783 *adjusted_insn_addr_end
= buildaddr
;
1785 /* Write the jump back to the program. */
1786 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1787 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1788 memcpy (buf
+ 1, &offset
, 4);
1789 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1791 /* The jump pad is now built. Wire in a jump to our jump pad. This
1792 is always done last (by our caller actually), so that we can
1793 install fast tracepoints with threads running. This relies on
1794 the agent's atomic write support. */
1797 /* Create a trampoline. */
1798 *trampoline_size
= sizeof (jump_insn
);
1799 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1801 /* No trampoline space available. */
1803 "E.Cannot allocate trampoline space needed for fast "
1804 "tracepoints on 4-byte instructions.");
1808 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1809 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1810 memcpy (buf
+ 1, &offset
, 4);
1811 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1813 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1814 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1815 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1816 memcpy (buf
+ 2, &offset
, 2);
1817 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1818 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1822 /* Else use a 32-bit relative jump instruction. */
1823 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1824 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1825 memcpy (buf
+ 1, &offset
, 4);
1826 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1827 *jjump_pad_insn_size
= sizeof (jump_insn
);
1830 /* Return the end address of our pad. */
1831 *jump_entry
= buildaddr
;
1837 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1838 CORE_ADDR collector
,
1841 CORE_ADDR
*jump_entry
,
1842 CORE_ADDR
*trampoline
,
1843 ULONGEST
*trampoline_size
,
1844 unsigned char *jjump_pad_insn
,
1845 ULONGEST
*jjump_pad_insn_size
,
1846 CORE_ADDR
*adjusted_insn_addr
,
1847 CORE_ADDR
*adjusted_insn_addr_end
,
1851 if (is_64bit_tdesc ())
1852 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1853 collector
, lockaddr
,
1854 orig_size
, jump_entry
,
1855 trampoline
, trampoline_size
,
1857 jjump_pad_insn_size
,
1859 adjusted_insn_addr_end
,
1863 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1864 collector
, lockaddr
,
1865 orig_size
, jump_entry
,
1866 trampoline
, trampoline_size
,
1868 jjump_pad_insn_size
,
1870 adjusted_insn_addr_end
,
1874 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1878 x86_get_min_fast_tracepoint_insn_len (void)
1880 static int warned_about_fast_tracepoints
= 0;
1883 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1884 used for fast tracepoints. */
1885 if (is_64bit_tdesc ())
1889 if (agent_loaded_p ())
1891 char errbuf
[IPA_BUFSIZ
];
1895 /* On x86, if trampolines are available, then 4-byte jump instructions
1896 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1897 with a 4-byte offset are used instead. */
1898 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1902 /* GDB has no channel to explain to user why a shorter fast
1903 tracepoint is not possible, but at least make GDBserver
1904 mention that something has gone awry. */
1905 if (!warned_about_fast_tracepoints
)
1907 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1908 warned_about_fast_tracepoints
= 1;
1915 /* Indicate that the minimum length is currently unknown since the IPA
1916 has not loaded yet. */
1922 add_insns (unsigned char *start
, int len
)
1924 CORE_ADDR buildaddr
= current_insn_ptr
;
1927 debug_printf ("Adding %d bytes of insn at %s\n",
1928 len
, paddress (buildaddr
));
1930 append_insns (&buildaddr
, len
, start
);
1931 current_insn_ptr
= buildaddr
;
1934 /* Our general strategy for emitting code is to avoid specifying raw
1935 bytes whenever possible, and instead copy a block of inline asm
1936 that is embedded in the function. This is a little messy, because
1937 we need to keep the compiler from discarding what looks like dead
1938 code, plus suppress various warnings. */
1940 #define EMIT_ASM(NAME, INSNS) \
1943 extern unsigned char start_ ## NAME, end_ ## NAME; \
1944 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1945 __asm__ ("jmp end_" #NAME "\n" \
1946 "\t" "start_" #NAME ":" \
1948 "\t" "end_" #NAME ":"); \
1953 #define EMIT_ASM32(NAME,INSNS) \
1956 extern unsigned char start_ ## NAME, end_ ## NAME; \
1957 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1958 __asm__ (".code32\n" \
1959 "\t" "jmp end_" #NAME "\n" \
1960 "\t" "start_" #NAME ":\n" \
1962 "\t" "end_" #NAME ":\n" \
1968 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1975 amd64_emit_prologue (void)
1977 EMIT_ASM (amd64_prologue
,
1979 "movq %rsp,%rbp\n\t"
1980 "sub $0x20,%rsp\n\t"
1981 "movq %rdi,-8(%rbp)\n\t"
1982 "movq %rsi,-16(%rbp)");
1987 amd64_emit_epilogue (void)
1989 EMIT_ASM (amd64_epilogue
,
1990 "movq -16(%rbp),%rdi\n\t"
1991 "movq %rax,(%rdi)\n\t"
1998 amd64_emit_add (void)
2000 EMIT_ASM (amd64_add
,
2001 "add (%rsp),%rax\n\t"
2002 "lea 0x8(%rsp),%rsp");
2006 amd64_emit_sub (void)
2008 EMIT_ASM (amd64_sub
,
2009 "sub %rax,(%rsp)\n\t"
2014 amd64_emit_mul (void)
2020 amd64_emit_lsh (void)
2026 amd64_emit_rsh_signed (void)
2032 amd64_emit_rsh_unsigned (void)
2038 amd64_emit_ext (int arg
)
2043 EMIT_ASM (amd64_ext_8
,
2049 EMIT_ASM (amd64_ext_16
,
2054 EMIT_ASM (amd64_ext_32
,
2063 amd64_emit_log_not (void)
2065 EMIT_ASM (amd64_log_not
,
2066 "test %rax,%rax\n\t"
2072 amd64_emit_bit_and (void)
2074 EMIT_ASM (amd64_and
,
2075 "and (%rsp),%rax\n\t"
2076 "lea 0x8(%rsp),%rsp");
2080 amd64_emit_bit_or (void)
2083 "or (%rsp),%rax\n\t"
2084 "lea 0x8(%rsp),%rsp");
2088 amd64_emit_bit_xor (void)
2090 EMIT_ASM (amd64_xor
,
2091 "xor (%rsp),%rax\n\t"
2092 "lea 0x8(%rsp),%rsp");
2096 amd64_emit_bit_not (void)
2098 EMIT_ASM (amd64_bit_not
,
2099 "xorq $0xffffffffffffffff,%rax");
2103 amd64_emit_equal (void)
2105 EMIT_ASM (amd64_equal
,
2106 "cmp %rax,(%rsp)\n\t"
2107 "je .Lamd64_equal_true\n\t"
2109 "jmp .Lamd64_equal_end\n\t"
2110 ".Lamd64_equal_true:\n\t"
2112 ".Lamd64_equal_end:\n\t"
2113 "lea 0x8(%rsp),%rsp");
2117 amd64_emit_less_signed (void)
2119 EMIT_ASM (amd64_less_signed
,
2120 "cmp %rax,(%rsp)\n\t"
2121 "jl .Lamd64_less_signed_true\n\t"
2123 "jmp .Lamd64_less_signed_end\n\t"
2124 ".Lamd64_less_signed_true:\n\t"
2126 ".Lamd64_less_signed_end:\n\t"
2127 "lea 0x8(%rsp),%rsp");
2131 amd64_emit_less_unsigned (void)
2133 EMIT_ASM (amd64_less_unsigned
,
2134 "cmp %rax,(%rsp)\n\t"
2135 "jb .Lamd64_less_unsigned_true\n\t"
2137 "jmp .Lamd64_less_unsigned_end\n\t"
2138 ".Lamd64_less_unsigned_true:\n\t"
2140 ".Lamd64_less_unsigned_end:\n\t"
2141 "lea 0x8(%rsp),%rsp");
2145 amd64_emit_ref (int size
)
2150 EMIT_ASM (amd64_ref1
,
2154 EMIT_ASM (amd64_ref2
,
2158 EMIT_ASM (amd64_ref4
,
2159 "movl (%rax),%eax");
2162 EMIT_ASM (amd64_ref8
,
2163 "movq (%rax),%rax");
2169 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2171 EMIT_ASM (amd64_if_goto
,
2175 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2183 amd64_emit_goto (int *offset_p
, int *size_p
)
2185 EMIT_ASM (amd64_goto
,
2186 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2194 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2196 int diff
= (to
- (from
+ size
));
2197 unsigned char buf
[sizeof (int)];
2205 memcpy (buf
, &diff
, sizeof (int));
2206 write_inferior_memory (from
, buf
, sizeof (int));
2210 amd64_emit_const (LONGEST num
)
2212 unsigned char buf
[16];
2214 CORE_ADDR buildaddr
= current_insn_ptr
;
2217 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2218 memcpy (&buf
[i
], &num
, sizeof (num
));
2220 append_insns (&buildaddr
, i
, buf
);
2221 current_insn_ptr
= buildaddr
;
2225 amd64_emit_call (CORE_ADDR fn
)
2227 unsigned char buf
[16];
2229 CORE_ADDR buildaddr
;
2232 /* The destination function being in the shared library, may be
2233 >31-bits away off the compiled code pad. */
2235 buildaddr
= current_insn_ptr
;
2237 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2241 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2243 /* Offset is too large for a call. Use callq, but that requires
2244 a register, so avoid it if possible. Use r10, since it is
2245 call-clobbered, we don't have to push/pop it. */
2246 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2248 memcpy (buf
+ i
, &fn
, 8);
2250 buf
[i
++] = 0xff; /* callq *%r10 */
2255 int offset32
= offset64
; /* we know we can't overflow here. */
2256 memcpy (buf
+ i
, &offset32
, 4);
2260 append_insns (&buildaddr
, i
, buf
);
2261 current_insn_ptr
= buildaddr
;
2265 amd64_emit_reg (int reg
)
2267 unsigned char buf
[16];
2269 CORE_ADDR buildaddr
;
2271 /* Assume raw_regs is still in %rdi. */
2272 buildaddr
= current_insn_ptr
;
2274 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2275 memcpy (&buf
[i
], ®
, sizeof (reg
));
2277 append_insns (&buildaddr
, i
, buf
);
2278 current_insn_ptr
= buildaddr
;
2279 amd64_emit_call (get_raw_reg_func_addr ());
2283 amd64_emit_pop (void)
2285 EMIT_ASM (amd64_pop
,
2290 amd64_emit_stack_flush (void)
2292 EMIT_ASM (amd64_stack_flush
,
2297 amd64_emit_zero_ext (int arg
)
2302 EMIT_ASM (amd64_zero_ext_8
,
2306 EMIT_ASM (amd64_zero_ext_16
,
2307 "and $0xffff,%rax");
2310 EMIT_ASM (amd64_zero_ext_32
,
2311 "mov $0xffffffff,%rcx\n\t"
2320 amd64_emit_swap (void)
2322 EMIT_ASM (amd64_swap
,
2329 amd64_emit_stack_adjust (int n
)
2331 unsigned char buf
[16];
2333 CORE_ADDR buildaddr
= current_insn_ptr
;
2336 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2340 /* This only handles adjustments up to 16, but we don't expect any more. */
2342 append_insns (&buildaddr
, i
, buf
);
2343 current_insn_ptr
= buildaddr
;
2346 /* FN's prototype is `LONGEST(*fn)(int)'. */
2349 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2351 unsigned char buf
[16];
2353 CORE_ADDR buildaddr
;
2355 buildaddr
= current_insn_ptr
;
2357 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2358 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2360 append_insns (&buildaddr
, i
, buf
);
2361 current_insn_ptr
= buildaddr
;
2362 amd64_emit_call (fn
);
2365 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2368 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2370 unsigned char buf
[16];
2372 CORE_ADDR buildaddr
;
2374 buildaddr
= current_insn_ptr
;
2376 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2377 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2379 append_insns (&buildaddr
, i
, buf
);
2380 current_insn_ptr
= buildaddr
;
2381 EMIT_ASM (amd64_void_call_2_a
,
2382 /* Save away a copy of the stack top. */
2384 /* Also pass top as the second argument. */
2386 amd64_emit_call (fn
);
2387 EMIT_ASM (amd64_void_call_2_b
,
2388 /* Restore the stack top, %rax may have been trashed. */
2393 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2396 "cmp %rax,(%rsp)\n\t"
2397 "jne .Lamd64_eq_fallthru\n\t"
2398 "lea 0x8(%rsp),%rsp\n\t"
2400 /* jmp, but don't trust the assembler to choose the right jump */
2401 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2402 ".Lamd64_eq_fallthru:\n\t"
2403 "lea 0x8(%rsp),%rsp\n\t"
2413 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2416 "cmp %rax,(%rsp)\n\t"
2417 "je .Lamd64_ne_fallthru\n\t"
2418 "lea 0x8(%rsp),%rsp\n\t"
2420 /* jmp, but don't trust the assembler to choose the right jump */
2421 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2422 ".Lamd64_ne_fallthru:\n\t"
2423 "lea 0x8(%rsp),%rsp\n\t"
2433 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2436 "cmp %rax,(%rsp)\n\t"
2437 "jnl .Lamd64_lt_fallthru\n\t"
2438 "lea 0x8(%rsp),%rsp\n\t"
2440 /* jmp, but don't trust the assembler to choose the right jump */
2441 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2442 ".Lamd64_lt_fallthru:\n\t"
2443 "lea 0x8(%rsp),%rsp\n\t"
2453 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2456 "cmp %rax,(%rsp)\n\t"
2457 "jnle .Lamd64_le_fallthru\n\t"
2458 "lea 0x8(%rsp),%rsp\n\t"
2460 /* jmp, but don't trust the assembler to choose the right jump */
2461 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2462 ".Lamd64_le_fallthru:\n\t"
2463 "lea 0x8(%rsp),%rsp\n\t"
2473 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2476 "cmp %rax,(%rsp)\n\t"
2477 "jng .Lamd64_gt_fallthru\n\t"
2478 "lea 0x8(%rsp),%rsp\n\t"
2480 /* jmp, but don't trust the assembler to choose the right jump */
2481 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2482 ".Lamd64_gt_fallthru:\n\t"
2483 "lea 0x8(%rsp),%rsp\n\t"
2493 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2496 "cmp %rax,(%rsp)\n\t"
2497 "jnge .Lamd64_ge_fallthru\n\t"
2498 ".Lamd64_ge_jump:\n\t"
2499 "lea 0x8(%rsp),%rsp\n\t"
2501 /* jmp, but don't trust the assembler to choose the right jump */
2502 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2503 ".Lamd64_ge_fallthru:\n\t"
2504 "lea 0x8(%rsp),%rsp\n\t"
2513 struct emit_ops amd64_emit_ops
=
2515 amd64_emit_prologue
,
2516 amd64_emit_epilogue
,
2521 amd64_emit_rsh_signed
,
2522 amd64_emit_rsh_unsigned
,
2530 amd64_emit_less_signed
,
2531 amd64_emit_less_unsigned
,
2535 amd64_write_goto_address
,
2540 amd64_emit_stack_flush
,
2541 amd64_emit_zero_ext
,
2543 amd64_emit_stack_adjust
,
2544 amd64_emit_int_call_1
,
2545 amd64_emit_void_call_2
,
2554 #endif /* __x86_64__ */
2557 i386_emit_prologue (void)
2559 EMIT_ASM32 (i386_prologue
,
2563 /* At this point, the raw regs base address is at 8(%ebp), and the
2564 value pointer is at 12(%ebp). */
2568 i386_emit_epilogue (void)
2570 EMIT_ASM32 (i386_epilogue
,
2571 "mov 12(%ebp),%ecx\n\t"
2572 "mov %eax,(%ecx)\n\t"
2573 "mov %ebx,0x4(%ecx)\n\t"
2581 i386_emit_add (void)
2583 EMIT_ASM32 (i386_add
,
2584 "add (%esp),%eax\n\t"
2585 "adc 0x4(%esp),%ebx\n\t"
2586 "lea 0x8(%esp),%esp");
2590 i386_emit_sub (void)
2592 EMIT_ASM32 (i386_sub
,
2593 "subl %eax,(%esp)\n\t"
2594 "sbbl %ebx,4(%esp)\n\t"
2600 i386_emit_mul (void)
2606 i386_emit_lsh (void)
2612 i386_emit_rsh_signed (void)
2618 i386_emit_rsh_unsigned (void)
2624 i386_emit_ext (int arg
)
2629 EMIT_ASM32 (i386_ext_8
,
2632 "movl %eax,%ebx\n\t"
2636 EMIT_ASM32 (i386_ext_16
,
2638 "movl %eax,%ebx\n\t"
2642 EMIT_ASM32 (i386_ext_32
,
2643 "movl %eax,%ebx\n\t"
2652 i386_emit_log_not (void)
2654 EMIT_ASM32 (i386_log_not
,
2656 "test %eax,%eax\n\t"
2663 i386_emit_bit_and (void)
2665 EMIT_ASM32 (i386_and
,
2666 "and (%esp),%eax\n\t"
2667 "and 0x4(%esp),%ebx\n\t"
2668 "lea 0x8(%esp),%esp");
2672 i386_emit_bit_or (void)
2674 EMIT_ASM32 (i386_or
,
2675 "or (%esp),%eax\n\t"
2676 "or 0x4(%esp),%ebx\n\t"
2677 "lea 0x8(%esp),%esp");
2681 i386_emit_bit_xor (void)
2683 EMIT_ASM32 (i386_xor
,
2684 "xor (%esp),%eax\n\t"
2685 "xor 0x4(%esp),%ebx\n\t"
2686 "lea 0x8(%esp),%esp");
2690 i386_emit_bit_not (void)
2692 EMIT_ASM32 (i386_bit_not
,
2693 "xor $0xffffffff,%eax\n\t"
2694 "xor $0xffffffff,%ebx\n\t");
2698 i386_emit_equal (void)
2700 EMIT_ASM32 (i386_equal
,
2701 "cmpl %ebx,4(%esp)\n\t"
2702 "jne .Li386_equal_false\n\t"
2703 "cmpl %eax,(%esp)\n\t"
2704 "je .Li386_equal_true\n\t"
2705 ".Li386_equal_false:\n\t"
2707 "jmp .Li386_equal_end\n\t"
2708 ".Li386_equal_true:\n\t"
2710 ".Li386_equal_end:\n\t"
2712 "lea 0x8(%esp),%esp");
2716 i386_emit_less_signed (void)
2718 EMIT_ASM32 (i386_less_signed
,
2719 "cmpl %ebx,4(%esp)\n\t"
2720 "jl .Li386_less_signed_true\n\t"
2721 "jne .Li386_less_signed_false\n\t"
2722 "cmpl %eax,(%esp)\n\t"
2723 "jl .Li386_less_signed_true\n\t"
2724 ".Li386_less_signed_false:\n\t"
2726 "jmp .Li386_less_signed_end\n\t"
2727 ".Li386_less_signed_true:\n\t"
2729 ".Li386_less_signed_end:\n\t"
2731 "lea 0x8(%esp),%esp");
2735 i386_emit_less_unsigned (void)
2737 EMIT_ASM32 (i386_less_unsigned
,
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jb .Li386_less_unsigned_true\n\t"
2740 "jne .Li386_less_unsigned_false\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jb .Li386_less_unsigned_true\n\t"
2743 ".Li386_less_unsigned_false:\n\t"
2745 "jmp .Li386_less_unsigned_end\n\t"
2746 ".Li386_less_unsigned_true:\n\t"
2748 ".Li386_less_unsigned_end:\n\t"
2750 "lea 0x8(%esp),%esp");
2754 i386_emit_ref (int size
)
2759 EMIT_ASM32 (i386_ref1
,
2763 EMIT_ASM32 (i386_ref2
,
2767 EMIT_ASM32 (i386_ref4
,
2768 "movl (%eax),%eax");
2771 EMIT_ASM32 (i386_ref8
,
2772 "movl 4(%eax),%ebx\n\t"
2773 "movl (%eax),%eax");
2779 i386_emit_if_goto (int *offset_p
, int *size_p
)
2781 EMIT_ASM32 (i386_if_goto
,
2787 /* Don't trust the assembler to choose the right jump */
2788 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2791 *offset_p
= 11; /* be sure that this matches the sequence above */
2797 i386_emit_goto (int *offset_p
, int *size_p
)
2799 EMIT_ASM32 (i386_goto
,
2800 /* Don't trust the assembler to choose the right jump */
2801 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2809 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2811 int diff
= (to
- (from
+ size
));
2812 unsigned char buf
[sizeof (int)];
2814 /* We're only doing 4-byte sizes at the moment. */
2821 memcpy (buf
, &diff
, sizeof (int));
2822 write_inferior_memory (from
, buf
, sizeof (int));
2826 i386_emit_const (LONGEST num
)
2828 unsigned char buf
[16];
2830 CORE_ADDR buildaddr
= current_insn_ptr
;
2833 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2834 lo
= num
& 0xffffffff;
2835 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2837 hi
= ((num
>> 32) & 0xffffffff);
2840 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2841 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2846 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2848 append_insns (&buildaddr
, i
, buf
);
2849 current_insn_ptr
= buildaddr
;
2853 i386_emit_call (CORE_ADDR fn
)
2855 unsigned char buf
[16];
2857 CORE_ADDR buildaddr
;
2859 buildaddr
= current_insn_ptr
;
2861 buf
[i
++] = 0xe8; /* call <reladdr> */
2862 offset
= ((int) fn
) - (buildaddr
+ 5);
2863 memcpy (buf
+ 1, &offset
, 4);
2864 append_insns (&buildaddr
, 5, buf
);
2865 current_insn_ptr
= buildaddr
;
2869 i386_emit_reg (int reg
)
2871 unsigned char buf
[16];
2873 CORE_ADDR buildaddr
;
2875 EMIT_ASM32 (i386_reg_a
,
2877 buildaddr
= current_insn_ptr
;
2879 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2880 memcpy (&buf
[i
], ®
, sizeof (reg
));
2882 append_insns (&buildaddr
, i
, buf
);
2883 current_insn_ptr
= buildaddr
;
2884 EMIT_ASM32 (i386_reg_b
,
2885 "mov %eax,4(%esp)\n\t"
2886 "mov 8(%ebp),%eax\n\t"
2888 i386_emit_call (get_raw_reg_func_addr ());
2889 EMIT_ASM32 (i386_reg_c
,
2891 "lea 0x8(%esp),%esp");
2895 i386_emit_pop (void)
2897 EMIT_ASM32 (i386_pop
,
2903 i386_emit_stack_flush (void)
2905 EMIT_ASM32 (i386_stack_flush
,
2911 i386_emit_zero_ext (int arg
)
2916 EMIT_ASM32 (i386_zero_ext_8
,
2917 "and $0xff,%eax\n\t"
2921 EMIT_ASM32 (i386_zero_ext_16
,
2922 "and $0xffff,%eax\n\t"
2926 EMIT_ASM32 (i386_zero_ext_32
,
2935 i386_emit_swap (void)
2937 EMIT_ASM32 (i386_swap
,
2947 i386_emit_stack_adjust (int n
)
2949 unsigned char buf
[16];
2951 CORE_ADDR buildaddr
= current_insn_ptr
;
2954 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2958 append_insns (&buildaddr
, i
, buf
);
2959 current_insn_ptr
= buildaddr
;
2962 /* FN's prototype is `LONGEST(*fn)(int)'. */
2965 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2967 unsigned char buf
[16];
2969 CORE_ADDR buildaddr
;
2971 EMIT_ASM32 (i386_int_call_1_a
,
2972 /* Reserve a bit of stack space. */
2974 /* Put the one argument on the stack. */
2975 buildaddr
= current_insn_ptr
;
2977 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2980 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2982 append_insns (&buildaddr
, i
, buf
);
2983 current_insn_ptr
= buildaddr
;
2984 i386_emit_call (fn
);
2985 EMIT_ASM32 (i386_int_call_1_c
,
2987 "lea 0x8(%esp),%esp");
2990 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2993 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2995 unsigned char buf
[16];
2997 CORE_ADDR buildaddr
;
2999 EMIT_ASM32 (i386_void_call_2_a
,
3000 /* Preserve %eax only; we don't have to worry about %ebx. */
3002 /* Reserve a bit of stack space for arguments. */
3003 "sub $0x10,%esp\n\t"
3004 /* Copy "top" to the second argument position. (Note that
3005 we can't assume function won't scribble on its
3006 arguments, so don't try to restore from this.) */
3007 "mov %eax,4(%esp)\n\t"
3008 "mov %ebx,8(%esp)");
3009 /* Put the first argument on the stack. */
3010 buildaddr
= current_insn_ptr
;
3012 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3015 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3017 append_insns (&buildaddr
, i
, buf
);
3018 current_insn_ptr
= buildaddr
;
3019 i386_emit_call (fn
);
3020 EMIT_ASM32 (i386_void_call_2_b
,
3021 "lea 0x10(%esp),%esp\n\t"
3022 /* Restore original stack top. */
3028 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3031 /* Check low half first, more likely to be decider */
3032 "cmpl %eax,(%esp)\n\t"
3033 "jne .Leq_fallthru\n\t"
3034 "cmpl %ebx,4(%esp)\n\t"
3035 "jne .Leq_fallthru\n\t"
3036 "lea 0x8(%esp),%esp\n\t"
3039 /* jmp, but don't trust the assembler to choose the right jump */
3040 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3041 ".Leq_fallthru:\n\t"
3042 "lea 0x8(%esp),%esp\n\t"
3053 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3056 /* Check low half first, more likely to be decider */
3057 "cmpl %eax,(%esp)\n\t"
3059 "cmpl %ebx,4(%esp)\n\t"
3060 "je .Lne_fallthru\n\t"
3062 "lea 0x8(%esp),%esp\n\t"
3065 /* jmp, but don't trust the assembler to choose the right jump */
3066 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3067 ".Lne_fallthru:\n\t"
3068 "lea 0x8(%esp),%esp\n\t"
3079 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3082 "cmpl %ebx,4(%esp)\n\t"
3084 "jne .Llt_fallthru\n\t"
3085 "cmpl %eax,(%esp)\n\t"
3086 "jnl .Llt_fallthru\n\t"
3088 "lea 0x8(%esp),%esp\n\t"
3091 /* jmp, but don't trust the assembler to choose the right jump */
3092 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3093 ".Llt_fallthru:\n\t"
3094 "lea 0x8(%esp),%esp\n\t"
3105 i386_emit_le_goto (int *offset_p
, int *size_p
)
3108 "cmpl %ebx,4(%esp)\n\t"
3110 "jne .Lle_fallthru\n\t"
3111 "cmpl %eax,(%esp)\n\t"
3112 "jnle .Lle_fallthru\n\t"
3114 "lea 0x8(%esp),%esp\n\t"
3117 /* jmp, but don't trust the assembler to choose the right jump */
3118 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3119 ".Lle_fallthru:\n\t"
3120 "lea 0x8(%esp),%esp\n\t"
3131 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3134 "cmpl %ebx,4(%esp)\n\t"
3136 "jne .Lgt_fallthru\n\t"
3137 "cmpl %eax,(%esp)\n\t"
3138 "jng .Lgt_fallthru\n\t"
3140 "lea 0x8(%esp),%esp\n\t"
3143 /* jmp, but don't trust the assembler to choose the right jump */
3144 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3145 ".Lgt_fallthru:\n\t"
3146 "lea 0x8(%esp),%esp\n\t"
3157 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3160 "cmpl %ebx,4(%esp)\n\t"
3162 "jne .Lge_fallthru\n\t"
3163 "cmpl %eax,(%esp)\n\t"
3164 "jnge .Lge_fallthru\n\t"
3166 "lea 0x8(%esp),%esp\n\t"
3169 /* jmp, but don't trust the assembler to choose the right jump */
3170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3171 ".Lge_fallthru:\n\t"
3172 "lea 0x8(%esp),%esp\n\t"
3182 struct emit_ops i386_emit_ops
=
3190 i386_emit_rsh_signed
,
3191 i386_emit_rsh_unsigned
,
3199 i386_emit_less_signed
,
3200 i386_emit_less_unsigned
,
3204 i386_write_goto_address
,
3209 i386_emit_stack_flush
,
3212 i386_emit_stack_adjust
,
3213 i386_emit_int_call_1
,
3214 i386_emit_void_call_2
,
3224 static struct emit_ops
*
3228 if (is_64bit_tdesc ())
3229 return &amd64_emit_ops
;
3232 return &i386_emit_ops
;
3236 x86_supports_range_stepping (void)
3241 /* This is initialized assuming an amd64 target.
3242 x86_arch_setup will correct it for i386 or amd64 targets. */
3244 struct linux_target_ops the_low_target
=
3247 x86_linux_regs_info
,
3248 x86_cannot_fetch_register
,
3249 x86_cannot_store_register
,
3250 NULL
, /* fetch_register */
3258 x86_supports_z_point_type
,
3261 x86_stopped_by_watchpoint
,
3262 x86_stopped_data_address
,
3263 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3264 native i386 case (no registers smaller than an xfer unit), and are not
3265 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3268 /* need to fix up i386 siginfo if host is amd64 */
3270 x86_linux_new_process
,
3271 x86_linux_new_thread
,
3272 x86_linux_prepare_to_resume
,
3273 x86_linux_process_qsupported
,
3274 x86_supports_tracepoints
,
3275 x86_get_thread_area
,
3276 x86_install_fast_tracepoint_jump_pad
,
3278 x86_get_min_fast_tracepoint_insn_len
,
3279 x86_supports_range_stepping
,
3283 initialize_low_arch (void)
3285 /* Initialize the Linux target descriptions. */
3287 init_registers_amd64_linux ();
3288 init_registers_amd64_avx_linux ();
3289 init_registers_amd64_avx512_linux ();
3290 init_registers_amd64_mpx_linux ();
3292 init_registers_x32_linux ();
3293 init_registers_x32_avx_linux ();
3294 init_registers_x32_avx512_linux ();
3296 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3297 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3298 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3300 init_registers_i386_linux ();
3301 init_registers_i386_mmx_linux ();
3302 init_registers_i386_avx_linux ();
3303 init_registers_i386_avx512_linux ();
3304 init_registers_i386_mpx_linux ();
3306 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3307 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3308 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3310 initialize_regsets_info (&x86_regsets_info
);