1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
42 /* Defined in auto-generated file amd64-linux.c. */
43 void init_registers_amd64_linux (void);
44 extern const struct target_desc
*tdesc_amd64_linux
;
46 /* Defined in auto-generated file amd64-avx-linux.c. */
47 void init_registers_amd64_avx_linux (void);
48 extern const struct target_desc
*tdesc_amd64_avx_linux
;
50 /* Defined in auto-generated file amd64-avx512-linux.c. */
51 void init_registers_amd64_avx512_linux (void);
52 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
54 /* Defined in auto-generated file amd64-mpx-linux.c. */
55 void init_registers_amd64_mpx_linux (void);
56 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
58 /* Defined in auto-generated file x32-linux.c. */
59 void init_registers_x32_linux (void);
60 extern const struct target_desc
*tdesc_x32_linux
;
62 /* Defined in auto-generated file x32-avx-linux.c. */
63 void init_registers_x32_avx_linux (void);
64 extern const struct target_desc
*tdesc_x32_avx_linux
;
66 /* Defined in auto-generated file x32-avx512-linux.c. */
67 void init_registers_x32_avx512_linux (void);
68 extern const struct target_desc
*tdesc_x32_avx512_linux
;
72 /* Defined in auto-generated file i386-linux.c. */
73 void init_registers_i386_linux (void);
74 extern const struct target_desc
*tdesc_i386_linux
;
76 /* Defined in auto-generated file i386-mmx-linux.c. */
77 void init_registers_i386_mmx_linux (void);
78 extern const struct target_desc
*tdesc_i386_mmx_linux
;
80 /* Defined in auto-generated file i386-avx-linux.c. */
81 void init_registers_i386_avx_linux (void);
82 extern const struct target_desc
*tdesc_i386_avx_linux
;
84 /* Defined in auto-generated file i386-avx512-linux.c. */
85 void init_registers_i386_avx512_linux (void);
86 extern const struct target_desc
*tdesc_i386_avx512_linux
;
88 /* Defined in auto-generated file i386-mpx-linux.c. */
89 void init_registers_i386_mpx_linux (void);
90 extern const struct target_desc
*tdesc_i386_mpx_linux
;
93 static struct target_desc
*tdesc_amd64_linux_no_xml
;
95 static struct target_desc
*tdesc_i386_linux_no_xml
;
98 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
99 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
101 /* Backward compatibility for gdb without XML support. */
103 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
104 <architecture>i386</architecture>\
105 <osabi>GNU/Linux</osabi>\
109 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
110 <architecture>i386:x86-64</architecture>\
111 <osabi>GNU/Linux</osabi>\
116 #include <sys/procfs.h>
117 #include <sys/ptrace.h>
120 #ifndef PTRACE_GETREGSET
121 #define PTRACE_GETREGSET 0x4204
124 #ifndef PTRACE_SETREGSET
125 #define PTRACE_SETREGSET 0x4205
129 #ifndef PTRACE_GET_THREAD_AREA
130 #define PTRACE_GET_THREAD_AREA 25
133 /* This definition comes from prctl.h, but some kernels may not have it. */
134 #ifndef PTRACE_ARCH_PRCTL
135 #define PTRACE_ARCH_PRCTL 30
138 /* The following definitions come from prctl.h, but may be absent
139 for certain configurations. */
141 #define ARCH_SET_GS 0x1001
142 #define ARCH_SET_FS 0x1002
143 #define ARCH_GET_FS 0x1003
144 #define ARCH_GET_GS 0x1004
147 /* Per-process arch-specific data we want to keep. */
149 struct arch_process_info
151 struct x86_debug_reg_state debug_reg_state
;
154 /* Per-thread arch-specific data we want to keep. */
158 /* Non-zero if our copy differs from what's recorded in the thread. */
159 int debug_registers_changed
;
164 /* Mapping between the general-purpose registers in `struct user'
165 format and GDB's register array layout.
166 Note that the transfer layout uses 64-bit regs. */
167 static /*const*/ int i386_regmap
[] =
169 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
170 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
171 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
172 DS
* 8, ES
* 8, FS
* 8, GS
* 8
175 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177 /* So code below doesn't have to care, i386 or amd64. */
178 #define ORIG_EAX ORIG_RAX
181 static const int x86_64_regmap
[] =
183 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
184 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
185 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
186 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
187 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
188 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
211 #else /* ! __x86_64__ */
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap
[] =
217 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
218 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
219 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
220 DS
* 4, ES
* 4, FS
* 4, GS
* 4
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
231 /* Returns true if the current inferior belongs to a x86-64 process,
235 is_64bit_tdesc (void)
237 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
239 return register_size (regcache
->tdesc
, 0) == 8;
245 /* Called by libthread_db. */
248 ps_get_thread_area (const struct ps_prochandle
*ph
,
249 lwpid_t lwpid
, int idx
, void **base
)
252 int use_64bit
= is_64bit_tdesc ();
259 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
263 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
274 unsigned int desc
[4];
276 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
277 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
280 /* Ensure we properly extend the value to 64-bits for x86_64. */
281 *base
= (void *) (uintptr_t) desc
[1];
286 /* Get the thread area address. This is used to recognize which
287 thread is which when tracing with the in-process agent library. We
288 don't read anything from the address, and treat it as opaque; it's
289 the address itself that we assume is unique per-thread. */
292 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
295 int use_64bit
= is_64bit_tdesc ();
300 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
302 *addr
= (CORE_ADDR
) (uintptr_t) base
;
311 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
312 struct thread_info
*thr
= get_lwp_thread (lwp
);
313 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
314 unsigned int desc
[4];
316 const int reg_thread_area
= 3; /* bits to scale down register value. */
319 collect_register_by_name (regcache
, "gs", &gs
);
321 idx
= gs
>> reg_thread_area
;
323 if (ptrace (PTRACE_GET_THREAD_AREA
,
325 (void *) (long) idx
, (unsigned long) &desc
) < 0)
336 x86_cannot_store_register (int regno
)
339 if (is_64bit_tdesc ())
343 return regno
>= I386_NUM_REGS
;
347 x86_cannot_fetch_register (int regno
)
350 if (is_64bit_tdesc ())
354 return regno
>= I386_NUM_REGS
;
358 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
363 if (register_size (regcache
->tdesc
, 0) == 8)
365 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
366 if (x86_64_regmap
[i
] != -1)
367 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
371 /* 32-bit inferior registers need to be zero-extended.
372 Callers would read uninitialized memory otherwise. */
373 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
376 for (i
= 0; i
< I386_NUM_REGS
; i
++)
377 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
379 collect_register_by_name (regcache
, "orig_eax",
380 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
384 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
389 if (register_size (regcache
->tdesc
, 0) == 8)
391 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
392 if (x86_64_regmap
[i
] != -1)
393 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
398 for (i
= 0; i
< I386_NUM_REGS
; i
++)
399 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
401 supply_register_by_name (regcache
, "orig_eax",
402 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
406 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
409 i387_cache_to_fxsave (regcache
, buf
);
411 i387_cache_to_fsave (regcache
, buf
);
416 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
419 i387_fxsave_to_cache (regcache
, buf
);
421 i387_fsave_to_cache (regcache
, buf
);
428 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
430 i387_cache_to_fxsave (regcache
, buf
);
434 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
436 i387_fxsave_to_cache (regcache
, buf
);
442 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
444 i387_cache_to_xsave (regcache
, buf
);
448 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
450 i387_xsave_to_cache (regcache
, buf
);
453 /* ??? The non-biarch i386 case stores all the i387 regs twice.
454 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
455 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
456 doesn't work. IWBN to avoid the duplication in the case where it
457 does work. Maybe the arch_setup routine could check whether it works
458 and update the supported regsets accordingly. */
460 static struct regset_info x86_regsets
[] =
462 #ifdef HAVE_PTRACE_GETREGS
463 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
465 x86_fill_gregset
, x86_store_gregset
},
466 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
467 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
469 # ifdef HAVE_PTRACE_GETFPXREGS
470 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
472 x86_fill_fpxregset
, x86_store_fpxregset
},
475 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
477 x86_fill_fpregset
, x86_store_fpregset
},
478 #endif /* HAVE_PTRACE_GETREGS */
479 { 0, 0, 0, -1, -1, NULL
, NULL
}
483 x86_get_pc (struct regcache
*regcache
)
485 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
490 collect_register_by_name (regcache
, "rip", &pc
);
491 return (CORE_ADDR
) pc
;
496 collect_register_by_name (regcache
, "eip", &pc
);
497 return (CORE_ADDR
) pc
;
502 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
504 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
508 unsigned long newpc
= pc
;
509 supply_register_by_name (regcache
, "rip", &newpc
);
513 unsigned int newpc
= pc
;
514 supply_register_by_name (regcache
, "eip", &newpc
);
518 static const unsigned char x86_breakpoint
[] = { 0xCC };
519 #define x86_breakpoint_len 1
522 x86_breakpoint_at (CORE_ADDR pc
)
526 (*the_target
->read_memory
) (pc
, &c
, 1);
533 /* Support for debug registers. */
536 x86_linux_dr_get (ptid_t ptid
, int regnum
)
541 tid
= ptid_get_lwp (ptid
);
544 value
= ptrace (PTRACE_PEEKUSER
, tid
,
545 offsetof (struct user
, u_debugreg
[regnum
]), 0);
547 error ("Couldn't read debug register");
553 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
557 tid
= ptid_get_lwp (ptid
);
560 ptrace (PTRACE_POKEUSER
, tid
,
561 offsetof (struct user
, u_debugreg
[regnum
]), value
);
563 error ("Couldn't write debug register");
567 update_debug_registers_callback (struct inferior_list_entry
*entry
,
570 struct thread_info
*thr
= (struct thread_info
*) entry
;
571 struct lwp_info
*lwp
= get_thread_lwp (thr
);
572 int pid
= *(int *) pid_p
;
574 /* Only update the threads of this process. */
575 if (pid_of (thr
) == pid
)
577 /* The actual update is done later just before resuming the lwp,
578 we just mark that the registers need updating. */
579 lwp
->arch_private
->debug_registers_changed
= 1;
581 /* If the lwp isn't stopped, force it to momentarily pause, so
582 we can update its debug registers. */
584 linux_stop_lwp (lwp
);
590 /* Update the inferior's debug register REGNUM from STATE. */
593 x86_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
595 /* Only update the threads of this process. */
596 int pid
= pid_of (current_thread
);
598 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
600 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
603 /* Return the inferior's debug register REGNUM. */
606 x86_dr_low_get_addr (int regnum
)
608 ptid_t ptid
= ptid_of (current_thread
);
610 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
612 return x86_linux_dr_get (ptid
, regnum
);
615 /* Update the inferior's DR7 debug control register from STATE. */
618 x86_dr_low_set_control (unsigned long control
)
620 /* Only update the threads of this process. */
621 int pid
= pid_of (current_thread
);
623 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
626 /* Return the inferior's DR7 debug control register. */
629 x86_dr_low_get_control (void)
631 ptid_t ptid
= ptid_of (current_thread
);
633 return x86_linux_dr_get (ptid
, DR_CONTROL
);
636 /* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
640 x86_dr_low_get_status (void)
642 ptid_t ptid
= ptid_of (current_thread
);
644 return x86_linux_dr_get (ptid
, DR_STATUS
);
647 /* Low-level function vector. */
648 struct x86_dr_low_type x86_dr_low
=
650 x86_dr_low_set_control
,
653 x86_dr_low_get_status
,
654 x86_dr_low_get_control
,
658 /* Breakpoint/Watchpoint support. */
661 x86_supports_z_point_type (char z_type
)
667 case Z_PACKET_WRITE_WP
:
668 case Z_PACKET_ACCESS_WP
:
676 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
677 int size
, struct raw_breakpoint
*bp
)
679 struct process_info
*proc
= current_process ();
683 case raw_bkpt_type_sw
:
684 return insert_memory_breakpoint (bp
);
686 case raw_bkpt_type_hw
:
687 case raw_bkpt_type_write_wp
:
688 case raw_bkpt_type_access_wp
:
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type
);
692 struct x86_debug_reg_state
*state
693 = &proc
->priv
->arch_private
->debug_reg_state
;
695 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
705 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
706 int size
, struct raw_breakpoint
*bp
)
708 struct process_info
*proc
= current_process ();
712 case raw_bkpt_type_sw
:
713 return remove_memory_breakpoint (bp
);
715 case raw_bkpt_type_hw
:
716 case raw_bkpt_type_write_wp
:
717 case raw_bkpt_type_access_wp
:
719 enum target_hw_bp_type hw_type
720 = raw_bkpt_type_to_target_hw_bp_type (type
);
721 struct x86_debug_reg_state
*state
722 = &proc
->priv
->arch_private
->debug_reg_state
;
724 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
733 x86_stopped_by_watchpoint (void)
735 struct process_info
*proc
= current_process ();
736 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
740 x86_stopped_data_address (void)
742 struct process_info
*proc
= current_process ();
744 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
750 /* Called when a new process is created. */
752 static struct arch_process_info
*
753 x86_linux_new_process (void)
755 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
757 x86_low_init_dregs (&info
->debug_reg_state
);
762 /* Called when a new thread is detected. */
764 static struct arch_lwp_info
*
765 x86_linux_new_thread (void)
767 struct arch_lwp_info
*info
= XCNEW (struct arch_lwp_info
);
769 info
->debug_registers_changed
= 1;
774 /* Called when resuming a thread.
775 If the debug regs have changed, update the thread's copies. */
778 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
780 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
781 int clear_status
= 0;
783 if (lwp
->arch_private
->debug_registers_changed
)
786 int pid
= ptid_get_pid (ptid
);
787 struct process_info
*proc
= find_process_pid (pid
);
788 struct x86_debug_reg_state
*state
789 = &proc
->priv
->arch_private
->debug_reg_state
;
791 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
793 ALL_DEBUG_ADDRESS_REGISTERS (i
)
794 if (state
->dr_ref_count
[i
] > 0)
796 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
798 /* If we're setting a watchpoint, any change the inferior
799 had done itself to the debug registers needs to be
800 discarded, otherwise, x86_dr_stopped_data_address can
805 if (state
->dr_control_mirror
!= 0)
806 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
808 lwp
->arch_private
->debug_registers_changed
= 0;
811 if (clear_status
|| lwp
->stop_reason
== LWP_STOPPED_BY_WATCHPOINT
)
812 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
815 /* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
821 /* These types below (compat_*) define a siginfo type that is layout
822 compatible with the siginfo type exported by the 32-bit userspace
827 typedef int compat_int_t
;
828 typedef unsigned int compat_uptr_t
;
830 typedef int compat_time_t
;
831 typedef int compat_timer_t
;
832 typedef int compat_clock_t
;
834 struct compat_timeval
836 compat_time_t tv_sec
;
840 typedef union compat_sigval
842 compat_int_t sival_int
;
843 compat_uptr_t sival_ptr
;
846 typedef struct compat_siginfo
854 int _pad
[((128 / sizeof (int)) - 3)];
863 /* POSIX.1b timers */
868 compat_sigval_t _sigval
;
871 /* POSIX.1b signals */
876 compat_sigval_t _sigval
;
885 compat_clock_t _utime
;
886 compat_clock_t _stime
;
889 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
904 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
905 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
907 typedef struct compat_x32_siginfo
915 int _pad
[((128 / sizeof (int)) - 3)];
924 /* POSIX.1b timers */
929 compat_sigval_t _sigval
;
932 /* POSIX.1b signals */
937 compat_sigval_t _sigval
;
946 compat_x32_clock_t _utime
;
947 compat_x32_clock_t _stime
;
950 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
963 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
965 #define cpt_si_pid _sifields._kill._pid
966 #define cpt_si_uid _sifields._kill._uid
967 #define cpt_si_timerid _sifields._timer._tid
968 #define cpt_si_overrun _sifields._timer._overrun
969 #define cpt_si_status _sifields._sigchld._status
970 #define cpt_si_utime _sifields._sigchld._utime
971 #define cpt_si_stime _sifields._sigchld._stime
972 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
973 #define cpt_si_addr _sifields._sigfault._addr
974 #define cpt_si_band _sifields._sigpoll._band
975 #define cpt_si_fd _sifields._sigpoll._fd
977 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
978 In their place is si_timer1,si_timer2. */
980 #define si_timerid si_timer1
983 #define si_overrun si_timer2
987 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
989 memset (to
, 0, sizeof (*to
));
991 to
->si_signo
= from
->si_signo
;
992 to
->si_errno
= from
->si_errno
;
993 to
->si_code
= from
->si_code
;
995 if (to
->si_code
== SI_TIMER
)
997 to
->cpt_si_timerid
= from
->si_timerid
;
998 to
->cpt_si_overrun
= from
->si_overrun
;
999 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1001 else if (to
->si_code
== SI_USER
)
1003 to
->cpt_si_pid
= from
->si_pid
;
1004 to
->cpt_si_uid
= from
->si_uid
;
1006 else if (to
->si_code
< 0)
1008 to
->cpt_si_pid
= from
->si_pid
;
1009 to
->cpt_si_uid
= from
->si_uid
;
1010 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1014 switch (to
->si_signo
)
1017 to
->cpt_si_pid
= from
->si_pid
;
1018 to
->cpt_si_uid
= from
->si_uid
;
1019 to
->cpt_si_status
= from
->si_status
;
1020 to
->cpt_si_utime
= from
->si_utime
;
1021 to
->cpt_si_stime
= from
->si_stime
;
1027 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1030 to
->cpt_si_band
= from
->si_band
;
1031 to
->cpt_si_fd
= from
->si_fd
;
1034 to
->cpt_si_pid
= from
->si_pid
;
1035 to
->cpt_si_uid
= from
->si_uid
;
1036 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1043 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1045 memset (to
, 0, sizeof (*to
));
1047 to
->si_signo
= from
->si_signo
;
1048 to
->si_errno
= from
->si_errno
;
1049 to
->si_code
= from
->si_code
;
1051 if (to
->si_code
== SI_TIMER
)
1053 to
->si_timerid
= from
->cpt_si_timerid
;
1054 to
->si_overrun
= from
->cpt_si_overrun
;
1055 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1057 else if (to
->si_code
== SI_USER
)
1059 to
->si_pid
= from
->cpt_si_pid
;
1060 to
->si_uid
= from
->cpt_si_uid
;
1062 else if (to
->si_code
< 0)
1064 to
->si_pid
= from
->cpt_si_pid
;
1065 to
->si_uid
= from
->cpt_si_uid
;
1066 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1070 switch (to
->si_signo
)
1073 to
->si_pid
= from
->cpt_si_pid
;
1074 to
->si_uid
= from
->cpt_si_uid
;
1075 to
->si_status
= from
->cpt_si_status
;
1076 to
->si_utime
= from
->cpt_si_utime
;
1077 to
->si_stime
= from
->cpt_si_stime
;
1083 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1086 to
->si_band
= from
->cpt_si_band
;
1087 to
->si_fd
= from
->cpt_si_fd
;
1090 to
->si_pid
= from
->cpt_si_pid
;
1091 to
->si_uid
= from
->cpt_si_uid
;
1092 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1099 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1102 memset (to
, 0, sizeof (*to
));
1104 to
->si_signo
= from
->si_signo
;
1105 to
->si_errno
= from
->si_errno
;
1106 to
->si_code
= from
->si_code
;
1108 if (to
->si_code
== SI_TIMER
)
1110 to
->cpt_si_timerid
= from
->si_timerid
;
1111 to
->cpt_si_overrun
= from
->si_overrun
;
1112 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1114 else if (to
->si_code
== SI_USER
)
1116 to
->cpt_si_pid
= from
->si_pid
;
1117 to
->cpt_si_uid
= from
->si_uid
;
1119 else if (to
->si_code
< 0)
1121 to
->cpt_si_pid
= from
->si_pid
;
1122 to
->cpt_si_uid
= from
->si_uid
;
1123 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1127 switch (to
->si_signo
)
1130 to
->cpt_si_pid
= from
->si_pid
;
1131 to
->cpt_si_uid
= from
->si_uid
;
1132 to
->cpt_si_status
= from
->si_status
;
1133 to
->cpt_si_utime
= from
->si_utime
;
1134 to
->cpt_si_stime
= from
->si_stime
;
1140 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1143 to
->cpt_si_band
= from
->si_band
;
1144 to
->cpt_si_fd
= from
->si_fd
;
1147 to
->cpt_si_pid
= from
->si_pid
;
1148 to
->cpt_si_uid
= from
->si_uid
;
1149 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1156 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1157 compat_x32_siginfo_t
*from
)
1159 memset (to
, 0, sizeof (*to
));
1161 to
->si_signo
= from
->si_signo
;
1162 to
->si_errno
= from
->si_errno
;
1163 to
->si_code
= from
->si_code
;
1165 if (to
->si_code
== SI_TIMER
)
1167 to
->si_timerid
= from
->cpt_si_timerid
;
1168 to
->si_overrun
= from
->cpt_si_overrun
;
1169 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1171 else if (to
->si_code
== SI_USER
)
1173 to
->si_pid
= from
->cpt_si_pid
;
1174 to
->si_uid
= from
->cpt_si_uid
;
1176 else if (to
->si_code
< 0)
1178 to
->si_pid
= from
->cpt_si_pid
;
1179 to
->si_uid
= from
->cpt_si_uid
;
1180 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1184 switch (to
->si_signo
)
1187 to
->si_pid
= from
->cpt_si_pid
;
1188 to
->si_uid
= from
->cpt_si_uid
;
1189 to
->si_status
= from
->cpt_si_status
;
1190 to
->si_utime
= from
->cpt_si_utime
;
1191 to
->si_stime
= from
->cpt_si_stime
;
1197 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1200 to
->si_band
= from
->cpt_si_band
;
1201 to
->si_fd
= from
->cpt_si_fd
;
1204 to
->si_pid
= from
->cpt_si_pid
;
1205 to
->si_uid
= from
->cpt_si_uid
;
1206 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1212 #endif /* __x86_64__ */
1214 /* Convert a native/host siginfo object, into/from the siginfo in the
1215 layout of the inferiors' architecture. Returns true if any
1216 conversion was done; false otherwise. If DIRECTION is 1, then copy
1217 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1221 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1224 unsigned int machine
;
1225 int tid
= lwpid_of (current_thread
);
1226 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1228 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1229 if (!is_64bit_tdesc ())
1231 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1234 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1236 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1240 /* No fixup for native x32 GDB. */
1241 else if (!is_elf64
&& sizeof (void *) == 8)
1243 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1246 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1249 siginfo_from_compat_x32_siginfo (native
,
1250 (struct compat_x32_siginfo
*) inf
);
1261 /* Format of XSAVE extended state is:
1264 fxsave_bytes[0..463]
1265 sw_usable_bytes[464..511]
1266 xstate_hdr_bytes[512..575]
1271 Same memory layout will be used for the coredump NT_X86_XSTATE
1272 representing the XSAVE extended state registers.
1274 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1275 extended state mask, which is the same as the extended control register
1276 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1277 together with the mask saved in the xstate_hdr_bytes to determine what
1278 states the processor/OS supports and what state, used or initialized,
1279 the process/thread is in. */
1280 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1282 /* Does the current host support the GETFPXREGS request? The header
1283 file may or may not define it, and even if it is defined, the
1284 kernel will return EIO if it's running on a pre-SSE processor. */
1285 int have_ptrace_getfpxregs
=
1286 #ifdef HAVE_PTRACE_GETFPXREGS
1293 /* Does the current host support PTRACE_GETREGSET? */
1294 static int have_ptrace_getregset
= -1;
1296 /* Get Linux/x86 target description from running target. */
1298 static const struct target_desc
*
1299 x86_linux_read_description (void)
1301 unsigned int machine
;
1305 static uint64_t xcr0
;
1306 struct regset_info
*regset
;
1308 tid
= lwpid_of (current_thread
);
1310 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1312 if (sizeof (void *) == 4)
1315 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1317 else if (machine
== EM_X86_64
)
1318 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1322 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1323 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1325 elf_fpxregset_t fpxregs
;
1327 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1329 have_ptrace_getfpxregs
= 0;
1330 have_ptrace_getregset
= 0;
1331 return tdesc_i386_mmx_linux
;
1334 have_ptrace_getfpxregs
= 1;
1340 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1342 /* Don't use XML. */
1344 if (machine
== EM_X86_64
)
1345 return tdesc_amd64_linux_no_xml
;
1348 return tdesc_i386_linux_no_xml
;
1351 if (have_ptrace_getregset
== -1)
1353 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1356 iov
.iov_base
= xstateregs
;
1357 iov
.iov_len
= sizeof (xstateregs
);
1359 /* Check if PTRACE_GETREGSET works. */
1360 if (ptrace (PTRACE_GETREGSET
, tid
,
1361 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1362 have_ptrace_getregset
= 0;
1365 have_ptrace_getregset
= 1;
1367 /* Get XCR0 from XSAVE extended state. */
1368 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1369 / sizeof (uint64_t))];
1371 /* Use PTRACE_GETREGSET if it is available. */
1372 for (regset
= x86_regsets
;
1373 regset
->fill_function
!= NULL
; regset
++)
1374 if (regset
->get_request
== PTRACE_GETREGSET
)
1375 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1376 else if (regset
->type
!= GENERAL_REGS
)
1381 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1382 xcr0_features
= (have_ptrace_getregset
1383 && (xcr0
& X86_XSTATE_ALL_MASK
));
1388 if (machine
== EM_X86_64
)
1395 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1397 case X86_XSTATE_AVX512_MASK
:
1398 return tdesc_amd64_avx512_linux
;
1400 case X86_XSTATE_MPX_MASK
:
1401 return tdesc_amd64_mpx_linux
;
1403 case X86_XSTATE_AVX_MASK
:
1404 return tdesc_amd64_avx_linux
;
1407 return tdesc_amd64_linux
;
1411 return tdesc_amd64_linux
;
1417 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1419 case X86_XSTATE_AVX512_MASK
:
1420 return tdesc_x32_avx512_linux
;
1422 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1423 case X86_XSTATE_AVX_MASK
:
1424 return tdesc_x32_avx_linux
;
1427 return tdesc_x32_linux
;
1431 return tdesc_x32_linux
;
1439 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1441 case (X86_XSTATE_AVX512_MASK
):
1442 return tdesc_i386_avx512_linux
;
1444 case (X86_XSTATE_MPX_MASK
):
1445 return tdesc_i386_mpx_linux
;
1447 case (X86_XSTATE_AVX_MASK
):
1448 return tdesc_i386_avx_linux
;
1451 return tdesc_i386_linux
;
1455 return tdesc_i386_linux
;
1458 gdb_assert_not_reached ("failed to return tdesc");
1461 /* Callback for find_inferior. Stops iteration when a thread with a
1462 given PID is found. */
1465 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1467 int pid
= *(int *) data
;
1469 return (ptid_get_pid (entry
->id
) == pid
);
1472 /* Callback for for_each_inferior. Calls the arch_setup routine for
1476 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1478 int pid
= ptid_get_pid (entry
->id
);
1480 /* Look up any thread of this processes. */
1482 = (struct thread_info
*) find_inferior (&all_threads
,
1483 same_process_callback
, &pid
);
1485 the_low_target
.arch_setup ();
1488 /* Update all the target description of all processes; a new GDB
1489 connected, and it may or not support xml target descriptions. */
1492 x86_linux_update_xmltarget (void)
1494 struct thread_info
*saved_thread
= current_thread
;
1496 /* Before changing the register cache's internal layout, flush the
1497 contents of the current valid caches back to the threads, and
1498 release the current regcache objects. */
1499 regcache_release ();
1501 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1503 current_thread
= saved_thread
;
1506 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1507 PTRACE_GETREGSET. */
1510 x86_linux_process_qsupported (const char *query
)
1512 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1513 with "i386" in qSupported query, it supports x86 XML target
1516 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1518 char *copy
= xstrdup (query
+ 13);
1521 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1523 if (strcmp (p
, "i386") == 0)
1533 x86_linux_update_xmltarget ();
1536 /* Common for x86/x86-64. */
1538 static struct regsets_info x86_regsets_info
=
1540 x86_regsets
, /* regsets */
1541 0, /* num_regsets */
1542 NULL
, /* disabled_regsets */
1546 static struct regs_info amd64_linux_regs_info
=
1548 NULL
, /* regset_bitmap */
1549 NULL
, /* usrregs_info */
1553 static struct usrregs_info i386_linux_usrregs_info
=
1559 static struct regs_info i386_linux_regs_info
=
1561 NULL
, /* regset_bitmap */
1562 &i386_linux_usrregs_info
,
1566 const struct regs_info
*
1567 x86_linux_regs_info (void)
1570 if (is_64bit_tdesc ())
1571 return &amd64_linux_regs_info
;
1574 return &i386_linux_regs_info
;
1577 /* Initialize the target description for the architecture of the
1581 x86_arch_setup (void)
1583 current_process ()->tdesc
= x86_linux_read_description ();
1587 x86_supports_tracepoints (void)
1593 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1595 write_inferior_memory (*to
, buf
, len
);
1600 push_opcode (unsigned char *buf
, char *op
)
1602 unsigned char *buf_org
= buf
;
1607 unsigned long ul
= strtoul (op
, &endptr
, 16);
1616 return buf
- buf_org
;
1621 /* Build a jump pad that saves registers and calls a collection
1622 function. Writes a jump instruction to the jump pad to
1623 JJUMPAD_INSN. The caller is responsible to write it in at the
1624 tracepoint address. */
1627 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1628 CORE_ADDR collector
,
1631 CORE_ADDR
*jump_entry
,
1632 CORE_ADDR
*trampoline
,
1633 ULONGEST
*trampoline_size
,
1634 unsigned char *jjump_pad_insn
,
1635 ULONGEST
*jjump_pad_insn_size
,
1636 CORE_ADDR
*adjusted_insn_addr
,
1637 CORE_ADDR
*adjusted_insn_addr_end
,
1640 unsigned char buf
[40];
1644 CORE_ADDR buildaddr
= *jump_entry
;
1646 /* Build the jump pad. */
1648 /* First, do tracepoint data collection. Save registers. */
1650 /* Need to ensure stack pointer saved first. */
1651 buf
[i
++] = 0x54; /* push %rsp */
1652 buf
[i
++] = 0x55; /* push %rbp */
1653 buf
[i
++] = 0x57; /* push %rdi */
1654 buf
[i
++] = 0x56; /* push %rsi */
1655 buf
[i
++] = 0x52; /* push %rdx */
1656 buf
[i
++] = 0x51; /* push %rcx */
1657 buf
[i
++] = 0x53; /* push %rbx */
1658 buf
[i
++] = 0x50; /* push %rax */
1659 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1660 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1661 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1662 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1663 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1664 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1665 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1666 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1667 buf
[i
++] = 0x9c; /* pushfq */
1668 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1670 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1671 i
+= sizeof (unsigned long);
1672 buf
[i
++] = 0x57; /* push %rdi */
1673 append_insns (&buildaddr
, i
, buf
);
1675 /* Stack space for the collecting_t object. */
1677 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1678 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1679 memcpy (buf
+ i
, &tpoint
, 8);
1681 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1682 i
+= push_opcode (&buf
[i
],
1683 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1684 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1685 append_insns (&buildaddr
, i
, buf
);
1689 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1690 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1692 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1693 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1694 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1695 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1696 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1697 append_insns (&buildaddr
, i
, buf
);
1699 /* Set up the gdb_collect call. */
1700 /* At this point, (stack pointer + 0x18) is the base of our saved
1704 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1705 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1707 /* tpoint address may be 64-bit wide. */
1708 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1709 memcpy (buf
+ i
, &tpoint
, 8);
1711 append_insns (&buildaddr
, i
, buf
);
1713 /* The collector function being in the shared library, may be
1714 >31-bits away off the jump pad. */
1716 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1717 memcpy (buf
+ i
, &collector
, 8);
1719 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1720 append_insns (&buildaddr
, i
, buf
);
1722 /* Clear the spin-lock. */
1724 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1725 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1726 memcpy (buf
+ i
, &lockaddr
, 8);
1728 append_insns (&buildaddr
, i
, buf
);
1730 /* Remove stack that had been used for the collect_t object. */
1732 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1733 append_insns (&buildaddr
, i
, buf
);
1735 /* Restore register state. */
1737 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1741 buf
[i
++] = 0x9d; /* popfq */
1742 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1743 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1744 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1745 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1746 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1747 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1748 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1749 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1750 buf
[i
++] = 0x58; /* pop %rax */
1751 buf
[i
++] = 0x5b; /* pop %rbx */
1752 buf
[i
++] = 0x59; /* pop %rcx */
1753 buf
[i
++] = 0x5a; /* pop %rdx */
1754 buf
[i
++] = 0x5e; /* pop %rsi */
1755 buf
[i
++] = 0x5f; /* pop %rdi */
1756 buf
[i
++] = 0x5d; /* pop %rbp */
1757 buf
[i
++] = 0x5c; /* pop %rsp */
1758 append_insns (&buildaddr
, i
, buf
);
1760 /* Now, adjust the original instruction to execute in the jump
1762 *adjusted_insn_addr
= buildaddr
;
1763 relocate_instruction (&buildaddr
, tpaddr
);
1764 *adjusted_insn_addr_end
= buildaddr
;
1766 /* Finally, write a jump back to the program. */
1768 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1769 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1772 "E.Jump back from jump pad too far from tracepoint "
1773 "(offset 0x%" PRIx64
" > int32).", loffset
);
1777 offset
= (int) loffset
;
1778 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1779 memcpy (buf
+ 1, &offset
, 4);
1780 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1782 /* The jump pad is now built. Wire in a jump to our jump pad. This
1783 is always done last (by our caller actually), so that we can
1784 install fast tracepoints with threads running. This relies on
1785 the agent's atomic write support. */
1786 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1787 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1790 "E.Jump pad too far from tracepoint "
1791 "(offset 0x%" PRIx64
" > int32).", loffset
);
1795 offset
= (int) loffset
;
1797 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1798 memcpy (buf
+ 1, &offset
, 4);
1799 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1800 *jjump_pad_insn_size
= sizeof (jump_insn
);
1802 /* Return the end address of our pad. */
1803 *jump_entry
= buildaddr
;
1808 #endif /* __x86_64__ */
1810 /* Build a jump pad that saves registers and calls a collection
1811 function. Writes a jump instruction to the jump pad to
1812 JJUMPAD_INSN. The caller is responsible to write it in at the
1813 tracepoint address. */
1816 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1817 CORE_ADDR collector
,
1820 CORE_ADDR
*jump_entry
,
1821 CORE_ADDR
*trampoline
,
1822 ULONGEST
*trampoline_size
,
1823 unsigned char *jjump_pad_insn
,
1824 ULONGEST
*jjump_pad_insn_size
,
1825 CORE_ADDR
*adjusted_insn_addr
,
1826 CORE_ADDR
*adjusted_insn_addr_end
,
1829 unsigned char buf
[0x100];
1831 CORE_ADDR buildaddr
= *jump_entry
;
1833 /* Build the jump pad. */
1835 /* First, do tracepoint data collection. Save registers. */
1837 buf
[i
++] = 0x60; /* pushad */
1838 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1839 *((int *)(buf
+ i
)) = (int) tpaddr
;
1841 buf
[i
++] = 0x9c; /* pushf */
1842 buf
[i
++] = 0x1e; /* push %ds */
1843 buf
[i
++] = 0x06; /* push %es */
1844 buf
[i
++] = 0x0f; /* push %fs */
1846 buf
[i
++] = 0x0f; /* push %gs */
1848 buf
[i
++] = 0x16; /* push %ss */
1849 buf
[i
++] = 0x0e; /* push %cs */
1850 append_insns (&buildaddr
, i
, buf
);
1852 /* Stack space for the collecting_t object. */
1854 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1856 /* Build the object. */
1857 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1858 memcpy (buf
+ i
, &tpoint
, 4);
1860 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1862 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1863 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1864 append_insns (&buildaddr
, i
, buf
);
1866 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1867 If we cared for it, this could be using xchg alternatively. */
1870 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1871 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1873 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1875 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1876 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1877 append_insns (&buildaddr
, i
, buf
);
1880 /* Set up arguments to the gdb_collect call. */
1882 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1883 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1884 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1885 append_insns (&buildaddr
, i
, buf
);
1888 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1889 append_insns (&buildaddr
, i
, buf
);
1892 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1893 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1895 append_insns (&buildaddr
, i
, buf
);
1897 buf
[0] = 0xe8; /* call <reladdr> */
1898 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1899 memcpy (buf
+ 1, &offset
, 4);
1900 append_insns (&buildaddr
, 5, buf
);
1901 /* Clean up after the call. */
1902 buf
[0] = 0x83; /* add $0x8,%esp */
1905 append_insns (&buildaddr
, 3, buf
);
1908 /* Clear the spin-lock. This would need the LOCK prefix on older
1911 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1912 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1913 memcpy (buf
+ i
, &lockaddr
, 4);
1915 append_insns (&buildaddr
, i
, buf
);
1918 /* Remove stack that had been used for the collect_t object. */
1920 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1921 append_insns (&buildaddr
, i
, buf
);
1924 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1927 buf
[i
++] = 0x17; /* pop %ss */
1928 buf
[i
++] = 0x0f; /* pop %gs */
1930 buf
[i
++] = 0x0f; /* pop %fs */
1932 buf
[i
++] = 0x07; /* pop %es */
1933 buf
[i
++] = 0x1f; /* pop %ds */
1934 buf
[i
++] = 0x9d; /* popf */
1935 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1938 buf
[i
++] = 0x61; /* popad */
1939 append_insns (&buildaddr
, i
, buf
);
1941 /* Now, adjust the original instruction to execute in the jump
1943 *adjusted_insn_addr
= buildaddr
;
1944 relocate_instruction (&buildaddr
, tpaddr
);
1945 *adjusted_insn_addr_end
= buildaddr
;
1947 /* Write the jump back to the program. */
1948 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1949 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1950 memcpy (buf
+ 1, &offset
, 4);
1951 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1953 /* The jump pad is now built. Wire in a jump to our jump pad. This
1954 is always done last (by our caller actually), so that we can
1955 install fast tracepoints with threads running. This relies on
1956 the agent's atomic write support. */
1959 /* Create a trampoline. */
1960 *trampoline_size
= sizeof (jump_insn
);
1961 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1963 /* No trampoline space available. */
1965 "E.Cannot allocate trampoline space needed for fast "
1966 "tracepoints on 4-byte instructions.");
1970 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1971 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1972 memcpy (buf
+ 1, &offset
, 4);
1973 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1975 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1976 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1977 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1978 memcpy (buf
+ 2, &offset
, 2);
1979 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1980 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1984 /* Else use a 32-bit relative jump instruction. */
1985 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1986 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1987 memcpy (buf
+ 1, &offset
, 4);
1988 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1989 *jjump_pad_insn_size
= sizeof (jump_insn
);
1992 /* Return the end address of our pad. */
1993 *jump_entry
= buildaddr
;
1999 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2000 CORE_ADDR collector
,
2003 CORE_ADDR
*jump_entry
,
2004 CORE_ADDR
*trampoline
,
2005 ULONGEST
*trampoline_size
,
2006 unsigned char *jjump_pad_insn
,
2007 ULONGEST
*jjump_pad_insn_size
,
2008 CORE_ADDR
*adjusted_insn_addr
,
2009 CORE_ADDR
*adjusted_insn_addr_end
,
2013 if (is_64bit_tdesc ())
2014 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2015 collector
, lockaddr
,
2016 orig_size
, jump_entry
,
2017 trampoline
, trampoline_size
,
2019 jjump_pad_insn_size
,
2021 adjusted_insn_addr_end
,
2025 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2026 collector
, lockaddr
,
2027 orig_size
, jump_entry
,
2028 trampoline
, trampoline_size
,
2030 jjump_pad_insn_size
,
2032 adjusted_insn_addr_end
,
2036 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2040 x86_get_min_fast_tracepoint_insn_len (void)
2042 static int warned_about_fast_tracepoints
= 0;
2045 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2046 used for fast tracepoints. */
2047 if (is_64bit_tdesc ())
2051 if (agent_loaded_p ())
2053 char errbuf
[IPA_BUFSIZ
];
2057 /* On x86, if trampolines are available, then 4-byte jump instructions
2058 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2059 with a 4-byte offset are used instead. */
2060 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2064 /* GDB has no channel to explain to user why a shorter fast
2065 tracepoint is not possible, but at least make GDBserver
2066 mention that something has gone awry. */
2067 if (!warned_about_fast_tracepoints
)
2069 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2070 warned_about_fast_tracepoints
= 1;
2077 /* Indicate that the minimum length is currently unknown since the IPA
2078 has not loaded yet. */
2084 add_insns (unsigned char *start
, int len
)
2086 CORE_ADDR buildaddr
= current_insn_ptr
;
2089 debug_printf ("Adding %d bytes of insn at %s\n",
2090 len
, paddress (buildaddr
));
2092 append_insns (&buildaddr
, len
, start
);
2093 current_insn_ptr
= buildaddr
;
2096 /* Our general strategy for emitting code is to avoid specifying raw
2097 bytes whenever possible, and instead copy a block of inline asm
2098 that is embedded in the function. This is a little messy, because
2099 we need to keep the compiler from discarding what looks like dead
2100 code, plus suppress various warnings. */
2102 #define EMIT_ASM(NAME, INSNS) \
2105 extern unsigned char start_ ## NAME, end_ ## NAME; \
2106 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2107 __asm__ ("jmp end_" #NAME "\n" \
2108 "\t" "start_" #NAME ":" \
2110 "\t" "end_" #NAME ":"); \
2115 #define EMIT_ASM32(NAME,INSNS) \
2118 extern unsigned char start_ ## NAME, end_ ## NAME; \
2119 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2120 __asm__ (".code32\n" \
2121 "\t" "jmp end_" #NAME "\n" \
2122 "\t" "start_" #NAME ":\n" \
2124 "\t" "end_" #NAME ":\n" \
2130 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2137 amd64_emit_prologue (void)
2139 EMIT_ASM (amd64_prologue
,
2141 "movq %rsp,%rbp\n\t"
2142 "sub $0x20,%rsp\n\t"
2143 "movq %rdi,-8(%rbp)\n\t"
2144 "movq %rsi,-16(%rbp)");
2149 amd64_emit_epilogue (void)
2151 EMIT_ASM (amd64_epilogue
,
2152 "movq -16(%rbp),%rdi\n\t"
2153 "movq %rax,(%rdi)\n\t"
2160 amd64_emit_add (void)
2162 EMIT_ASM (amd64_add
,
2163 "add (%rsp),%rax\n\t"
2164 "lea 0x8(%rsp),%rsp");
2168 amd64_emit_sub (void)
2170 EMIT_ASM (amd64_sub
,
2171 "sub %rax,(%rsp)\n\t"
2176 amd64_emit_mul (void)
2182 amd64_emit_lsh (void)
2188 amd64_emit_rsh_signed (void)
2194 amd64_emit_rsh_unsigned (void)
2200 amd64_emit_ext (int arg
)
2205 EMIT_ASM (amd64_ext_8
,
2211 EMIT_ASM (amd64_ext_16
,
2216 EMIT_ASM (amd64_ext_32
,
2225 amd64_emit_log_not (void)
2227 EMIT_ASM (amd64_log_not
,
2228 "test %rax,%rax\n\t"
2234 amd64_emit_bit_and (void)
2236 EMIT_ASM (amd64_and
,
2237 "and (%rsp),%rax\n\t"
2238 "lea 0x8(%rsp),%rsp");
2242 amd64_emit_bit_or (void)
2245 "or (%rsp),%rax\n\t"
2246 "lea 0x8(%rsp),%rsp");
2250 amd64_emit_bit_xor (void)
2252 EMIT_ASM (amd64_xor
,
2253 "xor (%rsp),%rax\n\t"
2254 "lea 0x8(%rsp),%rsp");
2258 amd64_emit_bit_not (void)
2260 EMIT_ASM (amd64_bit_not
,
2261 "xorq $0xffffffffffffffff,%rax");
2265 amd64_emit_equal (void)
2267 EMIT_ASM (amd64_equal
,
2268 "cmp %rax,(%rsp)\n\t"
2269 "je .Lamd64_equal_true\n\t"
2271 "jmp .Lamd64_equal_end\n\t"
2272 ".Lamd64_equal_true:\n\t"
2274 ".Lamd64_equal_end:\n\t"
2275 "lea 0x8(%rsp),%rsp");
2279 amd64_emit_less_signed (void)
2281 EMIT_ASM (amd64_less_signed
,
2282 "cmp %rax,(%rsp)\n\t"
2283 "jl .Lamd64_less_signed_true\n\t"
2285 "jmp .Lamd64_less_signed_end\n\t"
2286 ".Lamd64_less_signed_true:\n\t"
2288 ".Lamd64_less_signed_end:\n\t"
2289 "lea 0x8(%rsp),%rsp");
2293 amd64_emit_less_unsigned (void)
2295 EMIT_ASM (amd64_less_unsigned
,
2296 "cmp %rax,(%rsp)\n\t"
2297 "jb .Lamd64_less_unsigned_true\n\t"
2299 "jmp .Lamd64_less_unsigned_end\n\t"
2300 ".Lamd64_less_unsigned_true:\n\t"
2302 ".Lamd64_less_unsigned_end:\n\t"
2303 "lea 0x8(%rsp),%rsp");
2307 amd64_emit_ref (int size
)
2312 EMIT_ASM (amd64_ref1
,
2316 EMIT_ASM (amd64_ref2
,
2320 EMIT_ASM (amd64_ref4
,
2321 "movl (%rax),%eax");
2324 EMIT_ASM (amd64_ref8
,
2325 "movq (%rax),%rax");
2331 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2333 EMIT_ASM (amd64_if_goto
,
2337 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2345 amd64_emit_goto (int *offset_p
, int *size_p
)
2347 EMIT_ASM (amd64_goto
,
2348 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2356 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2358 int diff
= (to
- (from
+ size
));
2359 unsigned char buf
[sizeof (int)];
2367 memcpy (buf
, &diff
, sizeof (int));
2368 write_inferior_memory (from
, buf
, sizeof (int));
2372 amd64_emit_const (LONGEST num
)
2374 unsigned char buf
[16];
2376 CORE_ADDR buildaddr
= current_insn_ptr
;
2379 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2380 memcpy (&buf
[i
], &num
, sizeof (num
));
2382 append_insns (&buildaddr
, i
, buf
);
2383 current_insn_ptr
= buildaddr
;
2387 amd64_emit_call (CORE_ADDR fn
)
2389 unsigned char buf
[16];
2391 CORE_ADDR buildaddr
;
2394 /* The destination function being in the shared library, may be
2395 >31-bits away off the compiled code pad. */
2397 buildaddr
= current_insn_ptr
;
2399 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2403 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2405 /* Offset is too large for a call. Use callq, but that requires
2406 a register, so avoid it if possible. Use r10, since it is
2407 call-clobbered, we don't have to push/pop it. */
2408 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2410 memcpy (buf
+ i
, &fn
, 8);
2412 buf
[i
++] = 0xff; /* callq *%r10 */
2417 int offset32
= offset64
; /* we know we can't overflow here. */
2418 memcpy (buf
+ i
, &offset32
, 4);
2422 append_insns (&buildaddr
, i
, buf
);
2423 current_insn_ptr
= buildaddr
;
2427 amd64_emit_reg (int reg
)
2429 unsigned char buf
[16];
2431 CORE_ADDR buildaddr
;
2433 /* Assume raw_regs is still in %rdi. */
2434 buildaddr
= current_insn_ptr
;
2436 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2437 memcpy (&buf
[i
], ®
, sizeof (reg
));
2439 append_insns (&buildaddr
, i
, buf
);
2440 current_insn_ptr
= buildaddr
;
2441 amd64_emit_call (get_raw_reg_func_addr ());
2445 amd64_emit_pop (void)
2447 EMIT_ASM (amd64_pop
,
2452 amd64_emit_stack_flush (void)
2454 EMIT_ASM (amd64_stack_flush
,
2459 amd64_emit_zero_ext (int arg
)
2464 EMIT_ASM (amd64_zero_ext_8
,
2468 EMIT_ASM (amd64_zero_ext_16
,
2469 "and $0xffff,%rax");
2472 EMIT_ASM (amd64_zero_ext_32
,
2473 "mov $0xffffffff,%rcx\n\t"
2482 amd64_emit_swap (void)
2484 EMIT_ASM (amd64_swap
,
2491 amd64_emit_stack_adjust (int n
)
2493 unsigned char buf
[16];
2495 CORE_ADDR buildaddr
= current_insn_ptr
;
2498 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2502 /* This only handles adjustments up to 16, but we don't expect any more. */
2504 append_insns (&buildaddr
, i
, buf
);
2505 current_insn_ptr
= buildaddr
;
2508 /* FN's prototype is `LONGEST(*fn)(int)'. */
2511 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2513 unsigned char buf
[16];
2515 CORE_ADDR buildaddr
;
2517 buildaddr
= current_insn_ptr
;
2519 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2520 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2522 append_insns (&buildaddr
, i
, buf
);
2523 current_insn_ptr
= buildaddr
;
2524 amd64_emit_call (fn
);
2527 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2530 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2532 unsigned char buf
[16];
2534 CORE_ADDR buildaddr
;
2536 buildaddr
= current_insn_ptr
;
2538 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2539 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2541 append_insns (&buildaddr
, i
, buf
);
2542 current_insn_ptr
= buildaddr
;
2543 EMIT_ASM (amd64_void_call_2_a
,
2544 /* Save away a copy of the stack top. */
2546 /* Also pass top as the second argument. */
2548 amd64_emit_call (fn
);
2549 EMIT_ASM (amd64_void_call_2_b
,
2550 /* Restore the stack top, %rax may have been trashed. */
2555 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2558 "cmp %rax,(%rsp)\n\t"
2559 "jne .Lamd64_eq_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_eq_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2575 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2578 "cmp %rax,(%rsp)\n\t"
2579 "je .Lamd64_ne_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_ne_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2595 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnl .Lamd64_lt_fallthru\n\t"
2600 "lea 0x8(%rsp),%rsp\n\t"
2602 /* jmp, but don't trust the assembler to choose the right jump */
2603 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2604 ".Lamd64_lt_fallthru:\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2615 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2618 "cmp %rax,(%rsp)\n\t"
2619 "jnle .Lamd64_le_fallthru\n\t"
2620 "lea 0x8(%rsp),%rsp\n\t"
2622 /* jmp, but don't trust the assembler to choose the right jump */
2623 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2624 ".Lamd64_le_fallthru:\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2635 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2638 "cmp %rax,(%rsp)\n\t"
2639 "jng .Lamd64_gt_fallthru\n\t"
2640 "lea 0x8(%rsp),%rsp\n\t"
2642 /* jmp, but don't trust the assembler to choose the right jump */
2643 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2644 ".Lamd64_gt_fallthru:\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2655 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2658 "cmp %rax,(%rsp)\n\t"
2659 "jnge .Lamd64_ge_fallthru\n\t"
2660 ".Lamd64_ge_jump:\n\t"
2661 "lea 0x8(%rsp),%rsp\n\t"
2663 /* jmp, but don't trust the assembler to choose the right jump */
2664 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2665 ".Lamd64_ge_fallthru:\n\t"
2666 "lea 0x8(%rsp),%rsp\n\t"
2675 struct emit_ops amd64_emit_ops
=
2677 amd64_emit_prologue
,
2678 amd64_emit_epilogue
,
2683 amd64_emit_rsh_signed
,
2684 amd64_emit_rsh_unsigned
,
2692 amd64_emit_less_signed
,
2693 amd64_emit_less_unsigned
,
2697 amd64_write_goto_address
,
2702 amd64_emit_stack_flush
,
2703 amd64_emit_zero_ext
,
2705 amd64_emit_stack_adjust
,
2706 amd64_emit_int_call_1
,
2707 amd64_emit_void_call_2
,
2716 #endif /* __x86_64__ */
2719 i386_emit_prologue (void)
2721 EMIT_ASM32 (i386_prologue
,
2725 /* At this point, the raw regs base address is at 8(%ebp), and the
2726 value pointer is at 12(%ebp). */
2730 i386_emit_epilogue (void)
2732 EMIT_ASM32 (i386_epilogue
,
2733 "mov 12(%ebp),%ecx\n\t"
2734 "mov %eax,(%ecx)\n\t"
2735 "mov %ebx,0x4(%ecx)\n\t"
2743 i386_emit_add (void)
2745 EMIT_ASM32 (i386_add
,
2746 "add (%esp),%eax\n\t"
2747 "adc 0x4(%esp),%ebx\n\t"
2748 "lea 0x8(%esp),%esp");
2752 i386_emit_sub (void)
2754 EMIT_ASM32 (i386_sub
,
2755 "subl %eax,(%esp)\n\t"
2756 "sbbl %ebx,4(%esp)\n\t"
2762 i386_emit_mul (void)
2768 i386_emit_lsh (void)
2774 i386_emit_rsh_signed (void)
2780 i386_emit_rsh_unsigned (void)
2786 i386_emit_ext (int arg
)
2791 EMIT_ASM32 (i386_ext_8
,
2794 "movl %eax,%ebx\n\t"
2798 EMIT_ASM32 (i386_ext_16
,
2800 "movl %eax,%ebx\n\t"
2804 EMIT_ASM32 (i386_ext_32
,
2805 "movl %eax,%ebx\n\t"
2814 i386_emit_log_not (void)
2816 EMIT_ASM32 (i386_log_not
,
2818 "test %eax,%eax\n\t"
2825 i386_emit_bit_and (void)
2827 EMIT_ASM32 (i386_and
,
2828 "and (%esp),%eax\n\t"
2829 "and 0x4(%esp),%ebx\n\t"
2830 "lea 0x8(%esp),%esp");
2834 i386_emit_bit_or (void)
2836 EMIT_ASM32 (i386_or
,
2837 "or (%esp),%eax\n\t"
2838 "or 0x4(%esp),%ebx\n\t"
2839 "lea 0x8(%esp),%esp");
2843 i386_emit_bit_xor (void)
2845 EMIT_ASM32 (i386_xor
,
2846 "xor (%esp),%eax\n\t"
2847 "xor 0x4(%esp),%ebx\n\t"
2848 "lea 0x8(%esp),%esp");
2852 i386_emit_bit_not (void)
2854 EMIT_ASM32 (i386_bit_not
,
2855 "xor $0xffffffff,%eax\n\t"
2856 "xor $0xffffffff,%ebx\n\t");
2860 i386_emit_equal (void)
2862 EMIT_ASM32 (i386_equal
,
2863 "cmpl %ebx,4(%esp)\n\t"
2864 "jne .Li386_equal_false\n\t"
2865 "cmpl %eax,(%esp)\n\t"
2866 "je .Li386_equal_true\n\t"
2867 ".Li386_equal_false:\n\t"
2869 "jmp .Li386_equal_end\n\t"
2870 ".Li386_equal_true:\n\t"
2872 ".Li386_equal_end:\n\t"
2874 "lea 0x8(%esp),%esp");
2878 i386_emit_less_signed (void)
2880 EMIT_ASM32 (i386_less_signed
,
2881 "cmpl %ebx,4(%esp)\n\t"
2882 "jl .Li386_less_signed_true\n\t"
2883 "jne .Li386_less_signed_false\n\t"
2884 "cmpl %eax,(%esp)\n\t"
2885 "jl .Li386_less_signed_true\n\t"
2886 ".Li386_less_signed_false:\n\t"
2888 "jmp .Li386_less_signed_end\n\t"
2889 ".Li386_less_signed_true:\n\t"
2891 ".Li386_less_signed_end:\n\t"
2893 "lea 0x8(%esp),%esp");
2897 i386_emit_less_unsigned (void)
2899 EMIT_ASM32 (i386_less_unsigned
,
2900 "cmpl %ebx,4(%esp)\n\t"
2901 "jb .Li386_less_unsigned_true\n\t"
2902 "jne .Li386_less_unsigned_false\n\t"
2903 "cmpl %eax,(%esp)\n\t"
2904 "jb .Li386_less_unsigned_true\n\t"
2905 ".Li386_less_unsigned_false:\n\t"
2907 "jmp .Li386_less_unsigned_end\n\t"
2908 ".Li386_less_unsigned_true:\n\t"
2910 ".Li386_less_unsigned_end:\n\t"
2912 "lea 0x8(%esp),%esp");
2916 i386_emit_ref (int size
)
2921 EMIT_ASM32 (i386_ref1
,
2925 EMIT_ASM32 (i386_ref2
,
2929 EMIT_ASM32 (i386_ref4
,
2930 "movl (%eax),%eax");
2933 EMIT_ASM32 (i386_ref8
,
2934 "movl 4(%eax),%ebx\n\t"
2935 "movl (%eax),%eax");
2941 i386_emit_if_goto (int *offset_p
, int *size_p
)
2943 EMIT_ASM32 (i386_if_goto
,
2949 /* Don't trust the assembler to choose the right jump */
2950 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2953 *offset_p
= 11; /* be sure that this matches the sequence above */
2959 i386_emit_goto (int *offset_p
, int *size_p
)
2961 EMIT_ASM32 (i386_goto
,
2962 /* Don't trust the assembler to choose the right jump */
2963 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2971 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2973 int diff
= (to
- (from
+ size
));
2974 unsigned char buf
[sizeof (int)];
2976 /* We're only doing 4-byte sizes at the moment. */
2983 memcpy (buf
, &diff
, sizeof (int));
2984 write_inferior_memory (from
, buf
, sizeof (int));
2988 i386_emit_const (LONGEST num
)
2990 unsigned char buf
[16];
2992 CORE_ADDR buildaddr
= current_insn_ptr
;
2995 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2996 lo
= num
& 0xffffffff;
2997 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2999 hi
= ((num
>> 32) & 0xffffffff);
3002 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3003 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3008 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3010 append_insns (&buildaddr
, i
, buf
);
3011 current_insn_ptr
= buildaddr
;
3015 i386_emit_call (CORE_ADDR fn
)
3017 unsigned char buf
[16];
3019 CORE_ADDR buildaddr
;
3021 buildaddr
= current_insn_ptr
;
3023 buf
[i
++] = 0xe8; /* call <reladdr> */
3024 offset
= ((int) fn
) - (buildaddr
+ 5);
3025 memcpy (buf
+ 1, &offset
, 4);
3026 append_insns (&buildaddr
, 5, buf
);
3027 current_insn_ptr
= buildaddr
;
3031 i386_emit_reg (int reg
)
3033 unsigned char buf
[16];
3035 CORE_ADDR buildaddr
;
3037 EMIT_ASM32 (i386_reg_a
,
3039 buildaddr
= current_insn_ptr
;
3041 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3042 memcpy (&buf
[i
], ®
, sizeof (reg
));
3044 append_insns (&buildaddr
, i
, buf
);
3045 current_insn_ptr
= buildaddr
;
3046 EMIT_ASM32 (i386_reg_b
,
3047 "mov %eax,4(%esp)\n\t"
3048 "mov 8(%ebp),%eax\n\t"
3050 i386_emit_call (get_raw_reg_func_addr ());
3051 EMIT_ASM32 (i386_reg_c
,
3053 "lea 0x8(%esp),%esp");
3057 i386_emit_pop (void)
3059 EMIT_ASM32 (i386_pop
,
3065 i386_emit_stack_flush (void)
3067 EMIT_ASM32 (i386_stack_flush
,
3073 i386_emit_zero_ext (int arg
)
3078 EMIT_ASM32 (i386_zero_ext_8
,
3079 "and $0xff,%eax\n\t"
3083 EMIT_ASM32 (i386_zero_ext_16
,
3084 "and $0xffff,%eax\n\t"
3088 EMIT_ASM32 (i386_zero_ext_32
,
3097 i386_emit_swap (void)
3099 EMIT_ASM32 (i386_swap
,
3109 i386_emit_stack_adjust (int n
)
3111 unsigned char buf
[16];
3113 CORE_ADDR buildaddr
= current_insn_ptr
;
3116 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3120 append_insns (&buildaddr
, i
, buf
);
3121 current_insn_ptr
= buildaddr
;
3124 /* FN's prototype is `LONGEST(*fn)(int)'. */
3127 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3129 unsigned char buf
[16];
3131 CORE_ADDR buildaddr
;
3133 EMIT_ASM32 (i386_int_call_1_a
,
3134 /* Reserve a bit of stack space. */
3136 /* Put the one argument on the stack. */
3137 buildaddr
= current_insn_ptr
;
3139 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3142 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3144 append_insns (&buildaddr
, i
, buf
);
3145 current_insn_ptr
= buildaddr
;
3146 i386_emit_call (fn
);
3147 EMIT_ASM32 (i386_int_call_1_c
,
3149 "lea 0x8(%esp),%esp");
3152 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3155 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3157 unsigned char buf
[16];
3159 CORE_ADDR buildaddr
;
3161 EMIT_ASM32 (i386_void_call_2_a
,
3162 /* Preserve %eax only; we don't have to worry about %ebx. */
3164 /* Reserve a bit of stack space for arguments. */
3165 "sub $0x10,%esp\n\t"
3166 /* Copy "top" to the second argument position. (Note that
3167 we can't assume function won't scribble on its
3168 arguments, so don't try to restore from this.) */
3169 "mov %eax,4(%esp)\n\t"
3170 "mov %ebx,8(%esp)");
3171 /* Put the first argument on the stack. */
3172 buildaddr
= current_insn_ptr
;
3174 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3177 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3179 append_insns (&buildaddr
, i
, buf
);
3180 current_insn_ptr
= buildaddr
;
3181 i386_emit_call (fn
);
3182 EMIT_ASM32 (i386_void_call_2_b
,
3183 "lea 0x10(%esp),%esp\n\t"
3184 /* Restore original stack top. */
3190 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3193 /* Check low half first, more likely to be decider */
3194 "cmpl %eax,(%esp)\n\t"
3195 "jne .Leq_fallthru\n\t"
3196 "cmpl %ebx,4(%esp)\n\t"
3197 "jne .Leq_fallthru\n\t"
3198 "lea 0x8(%esp),%esp\n\t"
3201 /* jmp, but don't trust the assembler to choose the right jump */
3202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3203 ".Leq_fallthru:\n\t"
3204 "lea 0x8(%esp),%esp\n\t"
3215 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3218 /* Check low half first, more likely to be decider */
3219 "cmpl %eax,(%esp)\n\t"
3221 "cmpl %ebx,4(%esp)\n\t"
3222 "je .Lne_fallthru\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3227 /* jmp, but don't trust the assembler to choose the right jump */
3228 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3229 ".Lne_fallthru:\n\t"
3230 "lea 0x8(%esp),%esp\n\t"
3241 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3244 "cmpl %ebx,4(%esp)\n\t"
3246 "jne .Llt_fallthru\n\t"
3247 "cmpl %eax,(%esp)\n\t"
3248 "jnl .Llt_fallthru\n\t"
3250 "lea 0x8(%esp),%esp\n\t"
3253 /* jmp, but don't trust the assembler to choose the right jump */
3254 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3255 ".Llt_fallthru:\n\t"
3256 "lea 0x8(%esp),%esp\n\t"
3267 i386_emit_le_goto (int *offset_p
, int *size_p
)
3270 "cmpl %ebx,4(%esp)\n\t"
3272 "jne .Lle_fallthru\n\t"
3273 "cmpl %eax,(%esp)\n\t"
3274 "jnle .Lle_fallthru\n\t"
3276 "lea 0x8(%esp),%esp\n\t"
3279 /* jmp, but don't trust the assembler to choose the right jump */
3280 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3281 ".Lle_fallthru:\n\t"
3282 "lea 0x8(%esp),%esp\n\t"
3293 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3296 "cmpl %ebx,4(%esp)\n\t"
3298 "jne .Lgt_fallthru\n\t"
3299 "cmpl %eax,(%esp)\n\t"
3300 "jng .Lgt_fallthru\n\t"
3302 "lea 0x8(%esp),%esp\n\t"
3305 /* jmp, but don't trust the assembler to choose the right jump */
3306 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3307 ".Lgt_fallthru:\n\t"
3308 "lea 0x8(%esp),%esp\n\t"
3319 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3322 "cmpl %ebx,4(%esp)\n\t"
3324 "jne .Lge_fallthru\n\t"
3325 "cmpl %eax,(%esp)\n\t"
3326 "jnge .Lge_fallthru\n\t"
3328 "lea 0x8(%esp),%esp\n\t"
3331 /* jmp, but don't trust the assembler to choose the right jump */
3332 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3333 ".Lge_fallthru:\n\t"
3334 "lea 0x8(%esp),%esp\n\t"
3344 struct emit_ops i386_emit_ops
=
3352 i386_emit_rsh_signed
,
3353 i386_emit_rsh_unsigned
,
3361 i386_emit_less_signed
,
3362 i386_emit_less_unsigned
,
3366 i386_write_goto_address
,
3371 i386_emit_stack_flush
,
3374 i386_emit_stack_adjust
,
3375 i386_emit_int_call_1
,
3376 i386_emit_void_call_2
,
3386 static struct emit_ops
*
3390 if (is_64bit_tdesc ())
3391 return &amd64_emit_ops
;
3394 return &i386_emit_ops
;
3398 x86_supports_range_stepping (void)
3403 /* This is initialized assuming an amd64 target.
3404 x86_arch_setup will correct it for i386 or amd64 targets. */
3406 struct linux_target_ops the_low_target
=
3409 x86_linux_regs_info
,
3410 x86_cannot_fetch_register
,
3411 x86_cannot_store_register
,
3412 NULL
, /* fetch_register */
3420 x86_supports_z_point_type
,
3423 x86_stopped_by_watchpoint
,
3424 x86_stopped_data_address
,
3425 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3426 native i386 case (no registers smaller than an xfer unit), and are not
3427 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3430 /* need to fix up i386 siginfo if host is amd64 */
3432 x86_linux_new_process
,
3433 x86_linux_new_thread
,
3434 x86_linux_prepare_to_resume
,
3435 x86_linux_process_qsupported
,
3436 x86_supports_tracepoints
,
3437 x86_get_thread_area
,
3438 x86_install_fast_tracepoint_jump_pad
,
3440 x86_get_min_fast_tracepoint_insn_len
,
3441 x86_supports_range_stepping
,
3445 initialize_low_arch (void)
3447 /* Initialize the Linux target descriptions. */
3449 init_registers_amd64_linux ();
3450 init_registers_amd64_avx_linux ();
3451 init_registers_amd64_avx512_linux ();
3452 init_registers_amd64_mpx_linux ();
3454 init_registers_x32_linux ();
3455 init_registers_x32_avx_linux ();
3456 init_registers_x32_avx512_linux ();
3458 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3459 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3460 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3462 init_registers_i386_linux ();
3463 init_registers_i386_mmx_linux ();
3464 init_registers_i386_avx_linux ();
3465 init_registers_i386_avx512_linux ();
3466 init_registers_i386_mpx_linux ();
3468 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3469 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3470 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3472 initialize_regsets_info (&x86_regsets_info
);