1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc
*tdesc_amd64_linux
;
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc
*tdesc_amd64_avx_linux
;
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc
*tdesc_x32_linux
;
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc
*tdesc_x32_avx_linux
;
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc
*tdesc_x32_avx512_linux
;
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc
*tdesc_i386_linux
;
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc
*tdesc_i386_mmx_linux
;
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc
*tdesc_i386_avx_linux
;
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc
*tdesc_i386_avx512_linux
;
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc
*tdesc_i386_mpx_linux
;
95 static struct target_desc
*tdesc_amd64_linux_no_xml
;
97 static struct target_desc
*tdesc_i386_linux_no_xml
;
100 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
103 /* Backward compatibility for gdb without XML support. */
105 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
111 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
149 /* Per-process arch-specific data we want to keep. */
151 struct arch_process_info
153 struct x86_debug_reg_state debug_reg_state
;
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap
[] =
163 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
164 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
165 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
166 DS
* 8, ES
* 8, FS
* 8, GS
* 8
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
175 static const int x86_64_regmap
[] =
177 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
178 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
179 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
180 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
181 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
182 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
205 #else /* ! __x86_64__ */
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap
[] =
211 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
212 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
213 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
214 DS
* 4, ES
* 4, FS
* 4, GS
* 4
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225 /* Returns true if the current inferior belongs to a x86-64 process,
229 is_64bit_tdesc (void)
231 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
233 return register_size (regcache
->tdesc
, 0) == 8;
239 /* Called by libthread_db. */
242 ps_get_thread_area (const struct ps_prochandle
*ph
,
243 lwpid_t lwpid
, int idx
, void **base
)
246 int use_64bit
= is_64bit_tdesc ();
253 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
268 unsigned int desc
[4];
270 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
271 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base
= (void *) (uintptr_t) desc
[1];
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
286 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
289 int use_64bit
= is_64bit_tdesc ();
294 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
296 *addr
= (CORE_ADDR
) (uintptr_t) base
;
305 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
306 struct thread_info
*thr
= get_lwp_thread (lwp
);
307 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
308 unsigned int desc
[4];
310 const int reg_thread_area
= 3; /* bits to scale down register value. */
313 collect_register_by_name (regcache
, "gs", &gs
);
315 idx
= gs
>> reg_thread_area
;
317 if (ptrace (PTRACE_GET_THREAD_AREA
,
319 (void *) (long) idx
, (unsigned long) &desc
) < 0)
330 x86_cannot_store_register (int regno
)
333 if (is_64bit_tdesc ())
337 return regno
>= I386_NUM_REGS
;
341 x86_cannot_fetch_register (int regno
)
344 if (is_64bit_tdesc ())
348 return regno
>= I386_NUM_REGS
;
352 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
357 if (register_size (regcache
->tdesc
, 0) == 8)
359 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
360 if (x86_64_regmap
[i
] != -1)
361 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
370 for (i
= 0; i
< I386_NUM_REGS
; i
++)
371 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
373 collect_register_by_name (regcache
, "orig_eax",
374 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
378 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
383 if (register_size (regcache
->tdesc
, 0) == 8)
385 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
386 if (x86_64_regmap
[i
] != -1)
387 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
392 for (i
= 0; i
< I386_NUM_REGS
; i
++)
393 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
395 supply_register_by_name (regcache
, "orig_eax",
396 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
400 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
403 i387_cache_to_fxsave (regcache
, buf
);
405 i387_cache_to_fsave (regcache
, buf
);
410 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
413 i387_fxsave_to_cache (regcache
, buf
);
415 i387_fsave_to_cache (regcache
, buf
);
422 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
424 i387_cache_to_fxsave (regcache
, buf
);
428 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
430 i387_fxsave_to_cache (regcache
, buf
);
436 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
438 i387_cache_to_xsave (regcache
, buf
);
442 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
444 i387_xsave_to_cache (regcache
, buf
);
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
454 static struct regset_info x86_regsets
[] =
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
459 x86_fill_gregset
, x86_store_gregset
},
460 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
461 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
466 x86_fill_fpxregset
, x86_store_fpxregset
},
469 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
471 x86_fill_fpregset
, x86_store_fpregset
},
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL
, NULL
}
477 x86_get_pc (struct regcache
*regcache
)
479 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
484 collect_register_by_name (regcache
, "rip", &pc
);
485 return (CORE_ADDR
) pc
;
490 collect_register_by_name (regcache
, "eip", &pc
);
491 return (CORE_ADDR
) pc
;
496 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
498 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
502 unsigned long newpc
= pc
;
503 supply_register_by_name (regcache
, "rip", &newpc
);
507 unsigned int newpc
= pc
;
508 supply_register_by_name (regcache
, "eip", &newpc
);
512 static const unsigned char x86_breakpoint
[] = { 0xCC };
513 #define x86_breakpoint_len 1
516 x86_breakpoint_at (CORE_ADDR pc
)
520 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Return the offset of REGNUM in the u_debugreg field of struct
532 u_debugreg_offset (int regnum
)
534 return (offsetof (struct user
, u_debugreg
)
535 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
539 /* Support for debug registers. */
542 x86_linux_dr_get (ptid_t ptid
, int regnum
)
547 tid
= ptid_get_lwp (ptid
);
550 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
552 error ("Couldn't read debug register");
558 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
562 tid
= ptid_get_lwp (ptid
);
565 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
567 error ("Couldn't write debug register");
571 update_debug_registers_callback (struct lwp_info
*lwp
, void *arg
)
573 /* The actual update is done later just before resuming the lwp,
574 we just mark that the registers need updating. */
575 lwp_set_debug_registers_changed (lwp
, 1);
577 /* If the lwp isn't stopped, force it to momentarily pause, so
578 we can update its debug registers. */
579 if (!lwp_is_stopped (lwp
))
580 linux_stop_lwp (lwp
);
585 /* Update the inferior's debug register REGNUM from STATE. */
588 x86_linux_dr_set_addr (int regnum
, CORE_ADDR addr
)
590 /* Only update the threads of this process. */
591 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
593 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
595 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
598 /* Return the inferior's debug register REGNUM. */
601 x86_linux_dr_get_addr (int regnum
)
603 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
605 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
608 /* Update the inferior's DR7 debug control register from STATE. */
611 x86_linux_dr_set_control (unsigned long control
)
613 /* Only update the threads of this process. */
614 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
616 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
619 /* Return the inferior's DR7 debug control register. */
622 x86_linux_dr_get_control (void)
624 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
627 /* Get the value of the DR6 debug status register from the inferior
628 and record it in STATE. */
631 x86_linux_dr_get_status (void)
633 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
636 /* Low-level function vector. */
637 struct x86_dr_low_type x86_dr_low
=
639 x86_linux_dr_set_control
,
640 x86_linux_dr_set_addr
,
641 x86_linux_dr_get_addr
,
642 x86_linux_dr_get_status
,
643 x86_linux_dr_get_control
,
647 /* Breakpoint/Watchpoint support. */
650 x86_supports_z_point_type (char z_type
)
656 case Z_PACKET_WRITE_WP
:
657 case Z_PACKET_ACCESS_WP
:
665 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
666 int size
, struct raw_breakpoint
*bp
)
668 struct process_info
*proc
= current_process ();
672 case raw_bkpt_type_sw
:
673 return insert_memory_breakpoint (bp
);
675 case raw_bkpt_type_hw
:
676 case raw_bkpt_type_write_wp
:
677 case raw_bkpt_type_access_wp
:
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type
);
681 struct x86_debug_reg_state
*state
682 = &proc
->priv
->arch_private
->debug_reg_state
;
684 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
694 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
695 int size
, struct raw_breakpoint
*bp
)
697 struct process_info
*proc
= current_process ();
701 case raw_bkpt_type_sw
:
702 return remove_memory_breakpoint (bp
);
704 case raw_bkpt_type_hw
:
705 case raw_bkpt_type_write_wp
:
706 case raw_bkpt_type_access_wp
:
708 enum target_hw_bp_type hw_type
709 = raw_bkpt_type_to_target_hw_bp_type (type
);
710 struct x86_debug_reg_state
*state
711 = &proc
->priv
->arch_private
->debug_reg_state
;
713 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
722 x86_stopped_by_watchpoint (void)
724 struct process_info
*proc
= current_process ();
725 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
729 x86_stopped_data_address (void)
731 struct process_info
*proc
= current_process ();
733 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
739 /* Called when a new process is created. */
741 static struct arch_process_info
*
742 x86_linux_new_process (void)
744 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
746 x86_low_init_dregs (&info
->debug_reg_state
);
751 /* Called when a new thread is detected. */
754 x86_linux_new_thread (struct lwp_info
*lwp
)
756 lwp_set_debug_registers_changed (lwp
, 1);
759 /* See nat/x86-dregs.h. */
761 struct x86_debug_reg_state
*
762 x86_debug_reg_state (pid_t pid
)
764 struct process_info
*proc
= find_process_pid (pid
);
766 return &proc
->priv
->arch_private
->debug_reg_state
;
769 /* Called when resuming a thread.
770 If the debug regs have changed, update the thread's copies. */
773 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
775 ptid_t ptid
= ptid_of_lwp (lwp
);
776 int clear_status
= 0;
778 if (lwp_debug_registers_changed (lwp
))
780 struct x86_debug_reg_state
*state
781 = x86_debug_reg_state (ptid_get_pid (ptid
));
784 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
786 ALL_DEBUG_ADDRESS_REGISTERS (i
)
787 if (state
->dr_ref_count
[i
] > 0)
789 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
791 /* If we're setting a watchpoint, any change the inferior
792 had done itself to the debug registers needs to be
793 discarded, otherwise, x86_dr_stopped_data_address can
798 if (state
->dr_control_mirror
!= 0)
799 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
801 lwp_set_debug_registers_changed (lwp
, 0);
805 || lwp_stop_reason (lwp
) == TARGET_STOPPED_BY_WATCHPOINT
)
806 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
809 /* When GDBSERVER is built as a 64-bit application on linux, the
810 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
811 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
812 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
813 conversion in-place ourselves. */
815 /* These types below (compat_*) define a siginfo type that is layout
816 compatible with the siginfo type exported by the 32-bit userspace
821 typedef int compat_int_t
;
822 typedef unsigned int compat_uptr_t
;
824 typedef int compat_time_t
;
825 typedef int compat_timer_t
;
826 typedef int compat_clock_t
;
828 struct compat_timeval
830 compat_time_t tv_sec
;
834 typedef union compat_sigval
836 compat_int_t sival_int
;
837 compat_uptr_t sival_ptr
;
840 typedef struct compat_siginfo
848 int _pad
[((128 / sizeof (int)) - 3)];
857 /* POSIX.1b timers */
862 compat_sigval_t _sigval
;
865 /* POSIX.1b signals */
870 compat_sigval_t _sigval
;
879 compat_clock_t _utime
;
880 compat_clock_t _stime
;
883 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
898 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
899 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
901 typedef struct compat_x32_siginfo
909 int _pad
[((128 / sizeof (int)) - 3)];
918 /* POSIX.1b timers */
923 compat_sigval_t _sigval
;
926 /* POSIX.1b signals */
931 compat_sigval_t _sigval
;
940 compat_x32_clock_t _utime
;
941 compat_x32_clock_t _stime
;
944 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
957 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
959 #define cpt_si_pid _sifields._kill._pid
960 #define cpt_si_uid _sifields._kill._uid
961 #define cpt_si_timerid _sifields._timer._tid
962 #define cpt_si_overrun _sifields._timer._overrun
963 #define cpt_si_status _sifields._sigchld._status
964 #define cpt_si_utime _sifields._sigchld._utime
965 #define cpt_si_stime _sifields._sigchld._stime
966 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
967 #define cpt_si_addr _sifields._sigfault._addr
968 #define cpt_si_band _sifields._sigpoll._band
969 #define cpt_si_fd _sifields._sigpoll._fd
971 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
972 In their place is si_timer1,si_timer2. */
974 #define si_timerid si_timer1
977 #define si_overrun si_timer2
981 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
983 memset (to
, 0, sizeof (*to
));
985 to
->si_signo
= from
->si_signo
;
986 to
->si_errno
= from
->si_errno
;
987 to
->si_code
= from
->si_code
;
989 if (to
->si_code
== SI_TIMER
)
991 to
->cpt_si_timerid
= from
->si_timerid
;
992 to
->cpt_si_overrun
= from
->si_overrun
;
993 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
995 else if (to
->si_code
== SI_USER
)
997 to
->cpt_si_pid
= from
->si_pid
;
998 to
->cpt_si_uid
= from
->si_uid
;
1000 else if (to
->si_code
< 0)
1002 to
->cpt_si_pid
= from
->si_pid
;
1003 to
->cpt_si_uid
= from
->si_uid
;
1004 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1008 switch (to
->si_signo
)
1011 to
->cpt_si_pid
= from
->si_pid
;
1012 to
->cpt_si_uid
= from
->si_uid
;
1013 to
->cpt_si_status
= from
->si_status
;
1014 to
->cpt_si_utime
= from
->si_utime
;
1015 to
->cpt_si_stime
= from
->si_stime
;
1021 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1024 to
->cpt_si_band
= from
->si_band
;
1025 to
->cpt_si_fd
= from
->si_fd
;
1028 to
->cpt_si_pid
= from
->si_pid
;
1029 to
->cpt_si_uid
= from
->si_uid
;
1030 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1037 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1039 memset (to
, 0, sizeof (*to
));
1041 to
->si_signo
= from
->si_signo
;
1042 to
->si_errno
= from
->si_errno
;
1043 to
->si_code
= from
->si_code
;
1045 if (to
->si_code
== SI_TIMER
)
1047 to
->si_timerid
= from
->cpt_si_timerid
;
1048 to
->si_overrun
= from
->cpt_si_overrun
;
1049 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1051 else if (to
->si_code
== SI_USER
)
1053 to
->si_pid
= from
->cpt_si_pid
;
1054 to
->si_uid
= from
->cpt_si_uid
;
1056 else if (to
->si_code
< 0)
1058 to
->si_pid
= from
->cpt_si_pid
;
1059 to
->si_uid
= from
->cpt_si_uid
;
1060 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1064 switch (to
->si_signo
)
1067 to
->si_pid
= from
->cpt_si_pid
;
1068 to
->si_uid
= from
->cpt_si_uid
;
1069 to
->si_status
= from
->cpt_si_status
;
1070 to
->si_utime
= from
->cpt_si_utime
;
1071 to
->si_stime
= from
->cpt_si_stime
;
1077 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1080 to
->si_band
= from
->cpt_si_band
;
1081 to
->si_fd
= from
->cpt_si_fd
;
1084 to
->si_pid
= from
->cpt_si_pid
;
1085 to
->si_uid
= from
->cpt_si_uid
;
1086 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1093 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1096 memset (to
, 0, sizeof (*to
));
1098 to
->si_signo
= from
->si_signo
;
1099 to
->si_errno
= from
->si_errno
;
1100 to
->si_code
= from
->si_code
;
1102 if (to
->si_code
== SI_TIMER
)
1104 to
->cpt_si_timerid
= from
->si_timerid
;
1105 to
->cpt_si_overrun
= from
->si_overrun
;
1106 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1108 else if (to
->si_code
== SI_USER
)
1110 to
->cpt_si_pid
= from
->si_pid
;
1111 to
->cpt_si_uid
= from
->si_uid
;
1113 else if (to
->si_code
< 0)
1115 to
->cpt_si_pid
= from
->si_pid
;
1116 to
->cpt_si_uid
= from
->si_uid
;
1117 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1121 switch (to
->si_signo
)
1124 to
->cpt_si_pid
= from
->si_pid
;
1125 to
->cpt_si_uid
= from
->si_uid
;
1126 to
->cpt_si_status
= from
->si_status
;
1127 to
->cpt_si_utime
= from
->si_utime
;
1128 to
->cpt_si_stime
= from
->si_stime
;
1134 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1137 to
->cpt_si_band
= from
->si_band
;
1138 to
->cpt_si_fd
= from
->si_fd
;
1141 to
->cpt_si_pid
= from
->si_pid
;
1142 to
->cpt_si_uid
= from
->si_uid
;
1143 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1150 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1151 compat_x32_siginfo_t
*from
)
1153 memset (to
, 0, sizeof (*to
));
1155 to
->si_signo
= from
->si_signo
;
1156 to
->si_errno
= from
->si_errno
;
1157 to
->si_code
= from
->si_code
;
1159 if (to
->si_code
== SI_TIMER
)
1161 to
->si_timerid
= from
->cpt_si_timerid
;
1162 to
->si_overrun
= from
->cpt_si_overrun
;
1163 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1165 else if (to
->si_code
== SI_USER
)
1167 to
->si_pid
= from
->cpt_si_pid
;
1168 to
->si_uid
= from
->cpt_si_uid
;
1170 else if (to
->si_code
< 0)
1172 to
->si_pid
= from
->cpt_si_pid
;
1173 to
->si_uid
= from
->cpt_si_uid
;
1174 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1178 switch (to
->si_signo
)
1181 to
->si_pid
= from
->cpt_si_pid
;
1182 to
->si_uid
= from
->cpt_si_uid
;
1183 to
->si_status
= from
->cpt_si_status
;
1184 to
->si_utime
= from
->cpt_si_utime
;
1185 to
->si_stime
= from
->cpt_si_stime
;
1191 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1194 to
->si_band
= from
->cpt_si_band
;
1195 to
->si_fd
= from
->cpt_si_fd
;
1198 to
->si_pid
= from
->cpt_si_pid
;
1199 to
->si_uid
= from
->cpt_si_uid
;
1200 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1206 #endif /* __x86_64__ */
1208 /* Convert a native/host siginfo object, into/from the siginfo in the
1209 layout of the inferiors' architecture. Returns true if any
1210 conversion was done; false otherwise. If DIRECTION is 1, then copy
1211 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1215 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1218 unsigned int machine
;
1219 int tid
= lwpid_of (current_thread
);
1220 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1222 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1223 if (!is_64bit_tdesc ())
1225 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1228 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1230 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1234 /* No fixup for native x32 GDB. */
1235 else if (!is_elf64
&& sizeof (void *) == 8)
1237 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1240 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1243 siginfo_from_compat_x32_siginfo (native
,
1244 (struct compat_x32_siginfo
*) inf
);
1255 /* Format of XSAVE extended state is:
1258 fxsave_bytes[0..463]
1259 sw_usable_bytes[464..511]
1260 xstate_hdr_bytes[512..575]
1265 Same memory layout will be used for the coredump NT_X86_XSTATE
1266 representing the XSAVE extended state registers.
1268 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1269 extended state mask, which is the same as the extended control register
1270 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1271 together with the mask saved in the xstate_hdr_bytes to determine what
1272 states the processor/OS supports and what state, used or initialized,
1273 the process/thread is in. */
1274 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1276 /* Does the current host support the GETFPXREGS request? The header
1277 file may or may not define it, and even if it is defined, the
1278 kernel will return EIO if it's running on a pre-SSE processor. */
1279 int have_ptrace_getfpxregs
=
1280 #ifdef HAVE_PTRACE_GETFPXREGS
1287 /* Does the current host support PTRACE_GETREGSET? */
1288 static int have_ptrace_getregset
= -1;
1290 /* Get Linux/x86 target description from running target. */
1292 static const struct target_desc
*
1293 x86_linux_read_description (void)
1295 unsigned int machine
;
1299 static uint64_t xcr0
;
1300 struct regset_info
*regset
;
1302 tid
= lwpid_of (current_thread
);
1304 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1306 if (sizeof (void *) == 4)
1309 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1311 else if (machine
== EM_X86_64
)
1312 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1316 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1317 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1319 elf_fpxregset_t fpxregs
;
1321 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1323 have_ptrace_getfpxregs
= 0;
1324 have_ptrace_getregset
= 0;
1325 return tdesc_i386_mmx_linux
;
1328 have_ptrace_getfpxregs
= 1;
1334 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1336 /* Don't use XML. */
1338 if (machine
== EM_X86_64
)
1339 return tdesc_amd64_linux_no_xml
;
1342 return tdesc_i386_linux_no_xml
;
1345 if (have_ptrace_getregset
== -1)
1347 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1350 iov
.iov_base
= xstateregs
;
1351 iov
.iov_len
= sizeof (xstateregs
);
1353 /* Check if PTRACE_GETREGSET works. */
1354 if (ptrace (PTRACE_GETREGSET
, tid
,
1355 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1356 have_ptrace_getregset
= 0;
1359 have_ptrace_getregset
= 1;
1361 /* Get XCR0 from XSAVE extended state. */
1362 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1363 / sizeof (uint64_t))];
1365 /* Use PTRACE_GETREGSET if it is available. */
1366 for (regset
= x86_regsets
;
1367 regset
->fill_function
!= NULL
; regset
++)
1368 if (regset
->get_request
== PTRACE_GETREGSET
)
1369 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1370 else if (regset
->type
!= GENERAL_REGS
)
1375 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1376 xcr0_features
= (have_ptrace_getregset
1377 && (xcr0
& X86_XSTATE_ALL_MASK
));
1382 if (machine
== EM_X86_64
)
1389 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1391 case X86_XSTATE_AVX512_MASK
:
1392 return tdesc_amd64_avx512_linux
;
1394 case X86_XSTATE_MPX_MASK
:
1395 return tdesc_amd64_mpx_linux
;
1397 case X86_XSTATE_AVX_MASK
:
1398 return tdesc_amd64_avx_linux
;
1401 return tdesc_amd64_linux
;
1405 return tdesc_amd64_linux
;
1411 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1413 case X86_XSTATE_AVX512_MASK
:
1414 return tdesc_x32_avx512_linux
;
1416 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1417 case X86_XSTATE_AVX_MASK
:
1418 return tdesc_x32_avx_linux
;
1421 return tdesc_x32_linux
;
1425 return tdesc_x32_linux
;
1433 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1435 case (X86_XSTATE_AVX512_MASK
):
1436 return tdesc_i386_avx512_linux
;
1438 case (X86_XSTATE_MPX_MASK
):
1439 return tdesc_i386_mpx_linux
;
1441 case (X86_XSTATE_AVX_MASK
):
1442 return tdesc_i386_avx_linux
;
1445 return tdesc_i386_linux
;
1449 return tdesc_i386_linux
;
1452 gdb_assert_not_reached ("failed to return tdesc");
1455 /* Callback for find_inferior. Stops iteration when a thread with a
1456 given PID is found. */
1459 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1461 int pid
= *(int *) data
;
1463 return (ptid_get_pid (entry
->id
) == pid
);
1466 /* Callback for for_each_inferior. Calls the arch_setup routine for
1470 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1472 int pid
= ptid_get_pid (entry
->id
);
1474 /* Look up any thread of this processes. */
1476 = (struct thread_info
*) find_inferior (&all_threads
,
1477 same_process_callback
, &pid
);
1479 the_low_target
.arch_setup ();
1482 /* Update all the target description of all processes; a new GDB
1483 connected, and it may or not support xml target descriptions. */
1486 x86_linux_update_xmltarget (void)
1488 struct thread_info
*saved_thread
= current_thread
;
1490 /* Before changing the register cache's internal layout, flush the
1491 contents of the current valid caches back to the threads, and
1492 release the current regcache objects. */
1493 regcache_release ();
1495 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1497 current_thread
= saved_thread
;
1500 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1501 PTRACE_GETREGSET. */
1504 x86_linux_process_qsupported (const char *query
)
1506 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1507 with "i386" in qSupported query, it supports x86 XML target
1510 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1512 char *copy
= xstrdup (query
+ 13);
1515 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1517 if (strcmp (p
, "i386") == 0)
1527 x86_linux_update_xmltarget ();
1530 /* Common for x86/x86-64. */
1532 static struct regsets_info x86_regsets_info
=
1534 x86_regsets
, /* regsets */
1535 0, /* num_regsets */
1536 NULL
, /* disabled_regsets */
1540 static struct regs_info amd64_linux_regs_info
=
1542 NULL
, /* regset_bitmap */
1543 NULL
, /* usrregs_info */
1547 static struct usrregs_info i386_linux_usrregs_info
=
1553 static struct regs_info i386_linux_regs_info
=
1555 NULL
, /* regset_bitmap */
1556 &i386_linux_usrregs_info
,
1560 const struct regs_info
*
1561 x86_linux_regs_info (void)
1564 if (is_64bit_tdesc ())
1565 return &amd64_linux_regs_info
;
1568 return &i386_linux_regs_info
;
1571 /* Initialize the target description for the architecture of the
1575 x86_arch_setup (void)
1577 current_process ()->tdesc
= x86_linux_read_description ();
1581 x86_supports_tracepoints (void)
1587 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1589 write_inferior_memory (*to
, buf
, len
);
1594 push_opcode (unsigned char *buf
, char *op
)
1596 unsigned char *buf_org
= buf
;
1601 unsigned long ul
= strtoul (op
, &endptr
, 16);
1610 return buf
- buf_org
;
1615 /* Build a jump pad that saves registers and calls a collection
1616 function. Writes a jump instruction to the jump pad to
1617 JJUMPAD_INSN. The caller is responsible to write it in at the
1618 tracepoint address. */
1621 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1622 CORE_ADDR collector
,
1625 CORE_ADDR
*jump_entry
,
1626 CORE_ADDR
*trampoline
,
1627 ULONGEST
*trampoline_size
,
1628 unsigned char *jjump_pad_insn
,
1629 ULONGEST
*jjump_pad_insn_size
,
1630 CORE_ADDR
*adjusted_insn_addr
,
1631 CORE_ADDR
*adjusted_insn_addr_end
,
1634 unsigned char buf
[40];
1638 CORE_ADDR buildaddr
= *jump_entry
;
1640 /* Build the jump pad. */
1642 /* First, do tracepoint data collection. Save registers. */
1644 /* Need to ensure stack pointer saved first. */
1645 buf
[i
++] = 0x54; /* push %rsp */
1646 buf
[i
++] = 0x55; /* push %rbp */
1647 buf
[i
++] = 0x57; /* push %rdi */
1648 buf
[i
++] = 0x56; /* push %rsi */
1649 buf
[i
++] = 0x52; /* push %rdx */
1650 buf
[i
++] = 0x51; /* push %rcx */
1651 buf
[i
++] = 0x53; /* push %rbx */
1652 buf
[i
++] = 0x50; /* push %rax */
1653 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1654 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1655 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1656 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1657 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1658 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1659 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1660 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1661 buf
[i
++] = 0x9c; /* pushfq */
1662 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1664 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1665 i
+= sizeof (unsigned long);
1666 buf
[i
++] = 0x57; /* push %rdi */
1667 append_insns (&buildaddr
, i
, buf
);
1669 /* Stack space for the collecting_t object. */
1671 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1672 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1673 memcpy (buf
+ i
, &tpoint
, 8);
1675 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1676 i
+= push_opcode (&buf
[i
],
1677 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1678 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1679 append_insns (&buildaddr
, i
, buf
);
1683 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1684 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1686 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1687 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1688 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1689 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1690 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1691 append_insns (&buildaddr
, i
, buf
);
1693 /* Set up the gdb_collect call. */
1694 /* At this point, (stack pointer + 0x18) is the base of our saved
1698 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1699 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1701 /* tpoint address may be 64-bit wide. */
1702 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1703 memcpy (buf
+ i
, &tpoint
, 8);
1705 append_insns (&buildaddr
, i
, buf
);
1707 /* The collector function being in the shared library, may be
1708 >31-bits away off the jump pad. */
1710 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1711 memcpy (buf
+ i
, &collector
, 8);
1713 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1714 append_insns (&buildaddr
, i
, buf
);
1716 /* Clear the spin-lock. */
1718 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1719 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1720 memcpy (buf
+ i
, &lockaddr
, 8);
1722 append_insns (&buildaddr
, i
, buf
);
1724 /* Remove stack that had been used for the collect_t object. */
1726 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1727 append_insns (&buildaddr
, i
, buf
);
1729 /* Restore register state. */
1731 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1735 buf
[i
++] = 0x9d; /* popfq */
1736 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1737 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1738 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1739 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1740 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1741 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1742 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1743 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1744 buf
[i
++] = 0x58; /* pop %rax */
1745 buf
[i
++] = 0x5b; /* pop %rbx */
1746 buf
[i
++] = 0x59; /* pop %rcx */
1747 buf
[i
++] = 0x5a; /* pop %rdx */
1748 buf
[i
++] = 0x5e; /* pop %rsi */
1749 buf
[i
++] = 0x5f; /* pop %rdi */
1750 buf
[i
++] = 0x5d; /* pop %rbp */
1751 buf
[i
++] = 0x5c; /* pop %rsp */
1752 append_insns (&buildaddr
, i
, buf
);
1754 /* Now, adjust the original instruction to execute in the jump
1756 *adjusted_insn_addr
= buildaddr
;
1757 relocate_instruction (&buildaddr
, tpaddr
);
1758 *adjusted_insn_addr_end
= buildaddr
;
1760 /* Finally, write a jump back to the program. */
1762 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1763 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1766 "E.Jump back from jump pad too far from tracepoint "
1767 "(offset 0x%" PRIx64
" > int32).", loffset
);
1771 offset
= (int) loffset
;
1772 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1773 memcpy (buf
+ 1, &offset
, 4);
1774 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1776 /* The jump pad is now built. Wire in a jump to our jump pad. This
1777 is always done last (by our caller actually), so that we can
1778 install fast tracepoints with threads running. This relies on
1779 the agent's atomic write support. */
1780 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1781 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1784 "E.Jump pad too far from tracepoint "
1785 "(offset 0x%" PRIx64
" > int32).", loffset
);
1789 offset
= (int) loffset
;
1791 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1792 memcpy (buf
+ 1, &offset
, 4);
1793 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1794 *jjump_pad_insn_size
= sizeof (jump_insn
);
1796 /* Return the end address of our pad. */
1797 *jump_entry
= buildaddr
;
1802 #endif /* __x86_64__ */
1804 /* Build a jump pad that saves registers and calls a collection
1805 function. Writes a jump instruction to the jump pad to
1806 JJUMPAD_INSN. The caller is responsible to write it in at the
1807 tracepoint address. */
1810 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1811 CORE_ADDR collector
,
1814 CORE_ADDR
*jump_entry
,
1815 CORE_ADDR
*trampoline
,
1816 ULONGEST
*trampoline_size
,
1817 unsigned char *jjump_pad_insn
,
1818 ULONGEST
*jjump_pad_insn_size
,
1819 CORE_ADDR
*adjusted_insn_addr
,
1820 CORE_ADDR
*adjusted_insn_addr_end
,
1823 unsigned char buf
[0x100];
1825 CORE_ADDR buildaddr
= *jump_entry
;
1827 /* Build the jump pad. */
1829 /* First, do tracepoint data collection. Save registers. */
1831 buf
[i
++] = 0x60; /* pushad */
1832 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1833 *((int *)(buf
+ i
)) = (int) tpaddr
;
1835 buf
[i
++] = 0x9c; /* pushf */
1836 buf
[i
++] = 0x1e; /* push %ds */
1837 buf
[i
++] = 0x06; /* push %es */
1838 buf
[i
++] = 0x0f; /* push %fs */
1840 buf
[i
++] = 0x0f; /* push %gs */
1842 buf
[i
++] = 0x16; /* push %ss */
1843 buf
[i
++] = 0x0e; /* push %cs */
1844 append_insns (&buildaddr
, i
, buf
);
1846 /* Stack space for the collecting_t object. */
1848 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1850 /* Build the object. */
1851 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1852 memcpy (buf
+ i
, &tpoint
, 4);
1854 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1856 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1857 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1858 append_insns (&buildaddr
, i
, buf
);
1860 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1861 If we cared for it, this could be using xchg alternatively. */
1864 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1865 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1867 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1869 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1870 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1871 append_insns (&buildaddr
, i
, buf
);
1874 /* Set up arguments to the gdb_collect call. */
1876 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1877 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1878 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1879 append_insns (&buildaddr
, i
, buf
);
1882 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1883 append_insns (&buildaddr
, i
, buf
);
1886 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1887 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1889 append_insns (&buildaddr
, i
, buf
);
1891 buf
[0] = 0xe8; /* call <reladdr> */
1892 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1893 memcpy (buf
+ 1, &offset
, 4);
1894 append_insns (&buildaddr
, 5, buf
);
1895 /* Clean up after the call. */
1896 buf
[0] = 0x83; /* add $0x8,%esp */
1899 append_insns (&buildaddr
, 3, buf
);
1902 /* Clear the spin-lock. This would need the LOCK prefix on older
1905 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1906 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1907 memcpy (buf
+ i
, &lockaddr
, 4);
1909 append_insns (&buildaddr
, i
, buf
);
1912 /* Remove stack that had been used for the collect_t object. */
1914 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1915 append_insns (&buildaddr
, i
, buf
);
1918 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1921 buf
[i
++] = 0x17; /* pop %ss */
1922 buf
[i
++] = 0x0f; /* pop %gs */
1924 buf
[i
++] = 0x0f; /* pop %fs */
1926 buf
[i
++] = 0x07; /* pop %es */
1927 buf
[i
++] = 0x1f; /* pop %ds */
1928 buf
[i
++] = 0x9d; /* popf */
1929 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1932 buf
[i
++] = 0x61; /* popad */
1933 append_insns (&buildaddr
, i
, buf
);
1935 /* Now, adjust the original instruction to execute in the jump
1937 *adjusted_insn_addr
= buildaddr
;
1938 relocate_instruction (&buildaddr
, tpaddr
);
1939 *adjusted_insn_addr_end
= buildaddr
;
1941 /* Write the jump back to the program. */
1942 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1943 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1944 memcpy (buf
+ 1, &offset
, 4);
1945 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1947 /* The jump pad is now built. Wire in a jump to our jump pad. This
1948 is always done last (by our caller actually), so that we can
1949 install fast tracepoints with threads running. This relies on
1950 the agent's atomic write support. */
1953 /* Create a trampoline. */
1954 *trampoline_size
= sizeof (jump_insn
);
1955 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1957 /* No trampoline space available. */
1959 "E.Cannot allocate trampoline space needed for fast "
1960 "tracepoints on 4-byte instructions.");
1964 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1965 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1966 memcpy (buf
+ 1, &offset
, 4);
1967 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1969 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1970 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1971 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1972 memcpy (buf
+ 2, &offset
, 2);
1973 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1974 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1978 /* Else use a 32-bit relative jump instruction. */
1979 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1980 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1981 memcpy (buf
+ 1, &offset
, 4);
1982 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1983 *jjump_pad_insn_size
= sizeof (jump_insn
);
1986 /* Return the end address of our pad. */
1987 *jump_entry
= buildaddr
;
1993 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1994 CORE_ADDR collector
,
1997 CORE_ADDR
*jump_entry
,
1998 CORE_ADDR
*trampoline
,
1999 ULONGEST
*trampoline_size
,
2000 unsigned char *jjump_pad_insn
,
2001 ULONGEST
*jjump_pad_insn_size
,
2002 CORE_ADDR
*adjusted_insn_addr
,
2003 CORE_ADDR
*adjusted_insn_addr_end
,
2007 if (is_64bit_tdesc ())
2008 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2009 collector
, lockaddr
,
2010 orig_size
, jump_entry
,
2011 trampoline
, trampoline_size
,
2013 jjump_pad_insn_size
,
2015 adjusted_insn_addr_end
,
2019 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2020 collector
, lockaddr
,
2021 orig_size
, jump_entry
,
2022 trampoline
, trampoline_size
,
2024 jjump_pad_insn_size
,
2026 adjusted_insn_addr_end
,
2030 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2034 x86_get_min_fast_tracepoint_insn_len (void)
2036 static int warned_about_fast_tracepoints
= 0;
2039 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2040 used for fast tracepoints. */
2041 if (is_64bit_tdesc ())
2045 if (agent_loaded_p ())
2047 char errbuf
[IPA_BUFSIZ
];
2051 /* On x86, if trampolines are available, then 4-byte jump instructions
2052 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2053 with a 4-byte offset are used instead. */
2054 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2058 /* GDB has no channel to explain to user why a shorter fast
2059 tracepoint is not possible, but at least make GDBserver
2060 mention that something has gone awry. */
2061 if (!warned_about_fast_tracepoints
)
2063 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2064 warned_about_fast_tracepoints
= 1;
2071 /* Indicate that the minimum length is currently unknown since the IPA
2072 has not loaded yet. */
2078 add_insns (unsigned char *start
, int len
)
2080 CORE_ADDR buildaddr
= current_insn_ptr
;
2083 debug_printf ("Adding %d bytes of insn at %s\n",
2084 len
, paddress (buildaddr
));
2086 append_insns (&buildaddr
, len
, start
);
2087 current_insn_ptr
= buildaddr
;
2090 /* Our general strategy for emitting code is to avoid specifying raw
2091 bytes whenever possible, and instead copy a block of inline asm
2092 that is embedded in the function. This is a little messy, because
2093 we need to keep the compiler from discarding what looks like dead
2094 code, plus suppress various warnings. */
2096 #define EMIT_ASM(NAME, INSNS) \
2099 extern unsigned char start_ ## NAME, end_ ## NAME; \
2100 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2101 __asm__ ("jmp end_" #NAME "\n" \
2102 "\t" "start_" #NAME ":" \
2104 "\t" "end_" #NAME ":"); \
2109 #define EMIT_ASM32(NAME,INSNS) \
2112 extern unsigned char start_ ## NAME, end_ ## NAME; \
2113 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2114 __asm__ (".code32\n" \
2115 "\t" "jmp end_" #NAME "\n" \
2116 "\t" "start_" #NAME ":\n" \
2118 "\t" "end_" #NAME ":\n" \
2124 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2131 amd64_emit_prologue (void)
2133 EMIT_ASM (amd64_prologue
,
2135 "movq %rsp,%rbp\n\t"
2136 "sub $0x20,%rsp\n\t"
2137 "movq %rdi,-8(%rbp)\n\t"
2138 "movq %rsi,-16(%rbp)");
2143 amd64_emit_epilogue (void)
2145 EMIT_ASM (amd64_epilogue
,
2146 "movq -16(%rbp),%rdi\n\t"
2147 "movq %rax,(%rdi)\n\t"
2154 amd64_emit_add (void)
2156 EMIT_ASM (amd64_add
,
2157 "add (%rsp),%rax\n\t"
2158 "lea 0x8(%rsp),%rsp");
2162 amd64_emit_sub (void)
2164 EMIT_ASM (amd64_sub
,
2165 "sub %rax,(%rsp)\n\t"
2170 amd64_emit_mul (void)
2176 amd64_emit_lsh (void)
2182 amd64_emit_rsh_signed (void)
2188 amd64_emit_rsh_unsigned (void)
2194 amd64_emit_ext (int arg
)
2199 EMIT_ASM (amd64_ext_8
,
2205 EMIT_ASM (amd64_ext_16
,
2210 EMIT_ASM (amd64_ext_32
,
2219 amd64_emit_log_not (void)
2221 EMIT_ASM (amd64_log_not
,
2222 "test %rax,%rax\n\t"
2228 amd64_emit_bit_and (void)
2230 EMIT_ASM (amd64_and
,
2231 "and (%rsp),%rax\n\t"
2232 "lea 0x8(%rsp),%rsp");
2236 amd64_emit_bit_or (void)
2239 "or (%rsp),%rax\n\t"
2240 "lea 0x8(%rsp),%rsp");
2244 amd64_emit_bit_xor (void)
2246 EMIT_ASM (amd64_xor
,
2247 "xor (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2252 amd64_emit_bit_not (void)
2254 EMIT_ASM (amd64_bit_not
,
2255 "xorq $0xffffffffffffffff,%rax");
2259 amd64_emit_equal (void)
2261 EMIT_ASM (amd64_equal
,
2262 "cmp %rax,(%rsp)\n\t"
2263 "je .Lamd64_equal_true\n\t"
2265 "jmp .Lamd64_equal_end\n\t"
2266 ".Lamd64_equal_true:\n\t"
2268 ".Lamd64_equal_end:\n\t"
2269 "lea 0x8(%rsp),%rsp");
2273 amd64_emit_less_signed (void)
2275 EMIT_ASM (amd64_less_signed
,
2276 "cmp %rax,(%rsp)\n\t"
2277 "jl .Lamd64_less_signed_true\n\t"
2279 "jmp .Lamd64_less_signed_end\n\t"
2280 ".Lamd64_less_signed_true:\n\t"
2282 ".Lamd64_less_signed_end:\n\t"
2283 "lea 0x8(%rsp),%rsp");
2287 amd64_emit_less_unsigned (void)
2289 EMIT_ASM (amd64_less_unsigned
,
2290 "cmp %rax,(%rsp)\n\t"
2291 "jb .Lamd64_less_unsigned_true\n\t"
2293 "jmp .Lamd64_less_unsigned_end\n\t"
2294 ".Lamd64_less_unsigned_true:\n\t"
2296 ".Lamd64_less_unsigned_end:\n\t"
2297 "lea 0x8(%rsp),%rsp");
2301 amd64_emit_ref (int size
)
2306 EMIT_ASM (amd64_ref1
,
2310 EMIT_ASM (amd64_ref2
,
2314 EMIT_ASM (amd64_ref4
,
2315 "movl (%rax),%eax");
2318 EMIT_ASM (amd64_ref8
,
2319 "movq (%rax),%rax");
2325 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2327 EMIT_ASM (amd64_if_goto
,
2331 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2339 amd64_emit_goto (int *offset_p
, int *size_p
)
2341 EMIT_ASM (amd64_goto
,
2342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2350 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2352 int diff
= (to
- (from
+ size
));
2353 unsigned char buf
[sizeof (int)];
2361 memcpy (buf
, &diff
, sizeof (int));
2362 write_inferior_memory (from
, buf
, sizeof (int));
2366 amd64_emit_const (LONGEST num
)
2368 unsigned char buf
[16];
2370 CORE_ADDR buildaddr
= current_insn_ptr
;
2373 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2374 memcpy (&buf
[i
], &num
, sizeof (num
));
2376 append_insns (&buildaddr
, i
, buf
);
2377 current_insn_ptr
= buildaddr
;
2381 amd64_emit_call (CORE_ADDR fn
)
2383 unsigned char buf
[16];
2385 CORE_ADDR buildaddr
;
2388 /* The destination function being in the shared library, may be
2389 >31-bits away off the compiled code pad. */
2391 buildaddr
= current_insn_ptr
;
2393 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2397 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2399 /* Offset is too large for a call. Use callq, but that requires
2400 a register, so avoid it if possible. Use r10, since it is
2401 call-clobbered, we don't have to push/pop it. */
2402 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2404 memcpy (buf
+ i
, &fn
, 8);
2406 buf
[i
++] = 0xff; /* callq *%r10 */
2411 int offset32
= offset64
; /* we know we can't overflow here. */
2412 memcpy (buf
+ i
, &offset32
, 4);
2416 append_insns (&buildaddr
, i
, buf
);
2417 current_insn_ptr
= buildaddr
;
2421 amd64_emit_reg (int reg
)
2423 unsigned char buf
[16];
2425 CORE_ADDR buildaddr
;
2427 /* Assume raw_regs is still in %rdi. */
2428 buildaddr
= current_insn_ptr
;
2430 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2431 memcpy (&buf
[i
], ®
, sizeof (reg
));
2433 append_insns (&buildaddr
, i
, buf
);
2434 current_insn_ptr
= buildaddr
;
2435 amd64_emit_call (get_raw_reg_func_addr ());
2439 amd64_emit_pop (void)
2441 EMIT_ASM (amd64_pop
,
2446 amd64_emit_stack_flush (void)
2448 EMIT_ASM (amd64_stack_flush
,
2453 amd64_emit_zero_ext (int arg
)
2458 EMIT_ASM (amd64_zero_ext_8
,
2462 EMIT_ASM (amd64_zero_ext_16
,
2463 "and $0xffff,%rax");
2466 EMIT_ASM (amd64_zero_ext_32
,
2467 "mov $0xffffffff,%rcx\n\t"
2476 amd64_emit_swap (void)
2478 EMIT_ASM (amd64_swap
,
2485 amd64_emit_stack_adjust (int n
)
2487 unsigned char buf
[16];
2489 CORE_ADDR buildaddr
= current_insn_ptr
;
2492 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2496 /* This only handles adjustments up to 16, but we don't expect any more. */
2498 append_insns (&buildaddr
, i
, buf
);
2499 current_insn_ptr
= buildaddr
;
2502 /* FN's prototype is `LONGEST(*fn)(int)'. */
2505 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2507 unsigned char buf
[16];
2509 CORE_ADDR buildaddr
;
2511 buildaddr
= current_insn_ptr
;
2513 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2514 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2516 append_insns (&buildaddr
, i
, buf
);
2517 current_insn_ptr
= buildaddr
;
2518 amd64_emit_call (fn
);
2521 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2524 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2526 unsigned char buf
[16];
2528 CORE_ADDR buildaddr
;
2530 buildaddr
= current_insn_ptr
;
2532 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2533 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2535 append_insns (&buildaddr
, i
, buf
);
2536 current_insn_ptr
= buildaddr
;
2537 EMIT_ASM (amd64_void_call_2_a
,
2538 /* Save away a copy of the stack top. */
2540 /* Also pass top as the second argument. */
2542 amd64_emit_call (fn
);
2543 EMIT_ASM (amd64_void_call_2_b
,
2544 /* Restore the stack top, %rax may have been trashed. */
2549 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2552 "cmp %rax,(%rsp)\n\t"
2553 "jne .Lamd64_eq_fallthru\n\t"
2554 "lea 0x8(%rsp),%rsp\n\t"
2556 /* jmp, but don't trust the assembler to choose the right jump */
2557 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2558 ".Lamd64_eq_fallthru:\n\t"
2559 "lea 0x8(%rsp),%rsp\n\t"
2569 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2572 "cmp %rax,(%rsp)\n\t"
2573 "je .Lamd64_ne_fallthru\n\t"
2574 "lea 0x8(%rsp),%rsp\n\t"
2576 /* jmp, but don't trust the assembler to choose the right jump */
2577 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2578 ".Lamd64_ne_fallthru:\n\t"
2579 "lea 0x8(%rsp),%rsp\n\t"
2589 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2592 "cmp %rax,(%rsp)\n\t"
2593 "jnl .Lamd64_lt_fallthru\n\t"
2594 "lea 0x8(%rsp),%rsp\n\t"
2596 /* jmp, but don't trust the assembler to choose the right jump */
2597 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2598 ".Lamd64_lt_fallthru:\n\t"
2599 "lea 0x8(%rsp),%rsp\n\t"
2609 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2612 "cmp %rax,(%rsp)\n\t"
2613 "jnle .Lamd64_le_fallthru\n\t"
2614 "lea 0x8(%rsp),%rsp\n\t"
2616 /* jmp, but don't trust the assembler to choose the right jump */
2617 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2618 ".Lamd64_le_fallthru:\n\t"
2619 "lea 0x8(%rsp),%rsp\n\t"
2629 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2632 "cmp %rax,(%rsp)\n\t"
2633 "jng .Lamd64_gt_fallthru\n\t"
2634 "lea 0x8(%rsp),%rsp\n\t"
2636 /* jmp, but don't trust the assembler to choose the right jump */
2637 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2638 ".Lamd64_gt_fallthru:\n\t"
2639 "lea 0x8(%rsp),%rsp\n\t"
2649 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2652 "cmp %rax,(%rsp)\n\t"
2653 "jnge .Lamd64_ge_fallthru\n\t"
2654 ".Lamd64_ge_jump:\n\t"
2655 "lea 0x8(%rsp),%rsp\n\t"
2657 /* jmp, but don't trust the assembler to choose the right jump */
2658 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2659 ".Lamd64_ge_fallthru:\n\t"
2660 "lea 0x8(%rsp),%rsp\n\t"
2669 struct emit_ops amd64_emit_ops
=
2671 amd64_emit_prologue
,
2672 amd64_emit_epilogue
,
2677 amd64_emit_rsh_signed
,
2678 amd64_emit_rsh_unsigned
,
2686 amd64_emit_less_signed
,
2687 amd64_emit_less_unsigned
,
2691 amd64_write_goto_address
,
2696 amd64_emit_stack_flush
,
2697 amd64_emit_zero_ext
,
2699 amd64_emit_stack_adjust
,
2700 amd64_emit_int_call_1
,
2701 amd64_emit_void_call_2
,
2710 #endif /* __x86_64__ */
2713 i386_emit_prologue (void)
2715 EMIT_ASM32 (i386_prologue
,
2719 /* At this point, the raw regs base address is at 8(%ebp), and the
2720 value pointer is at 12(%ebp). */
2724 i386_emit_epilogue (void)
2726 EMIT_ASM32 (i386_epilogue
,
2727 "mov 12(%ebp),%ecx\n\t"
2728 "mov %eax,(%ecx)\n\t"
2729 "mov %ebx,0x4(%ecx)\n\t"
2737 i386_emit_add (void)
2739 EMIT_ASM32 (i386_add
,
2740 "add (%esp),%eax\n\t"
2741 "adc 0x4(%esp),%ebx\n\t"
2742 "lea 0x8(%esp),%esp");
2746 i386_emit_sub (void)
2748 EMIT_ASM32 (i386_sub
,
2749 "subl %eax,(%esp)\n\t"
2750 "sbbl %ebx,4(%esp)\n\t"
2756 i386_emit_mul (void)
2762 i386_emit_lsh (void)
2768 i386_emit_rsh_signed (void)
2774 i386_emit_rsh_unsigned (void)
2780 i386_emit_ext (int arg
)
2785 EMIT_ASM32 (i386_ext_8
,
2788 "movl %eax,%ebx\n\t"
2792 EMIT_ASM32 (i386_ext_16
,
2794 "movl %eax,%ebx\n\t"
2798 EMIT_ASM32 (i386_ext_32
,
2799 "movl %eax,%ebx\n\t"
2808 i386_emit_log_not (void)
2810 EMIT_ASM32 (i386_log_not
,
2812 "test %eax,%eax\n\t"
2819 i386_emit_bit_and (void)
2821 EMIT_ASM32 (i386_and
,
2822 "and (%esp),%eax\n\t"
2823 "and 0x4(%esp),%ebx\n\t"
2824 "lea 0x8(%esp),%esp");
2828 i386_emit_bit_or (void)
2830 EMIT_ASM32 (i386_or
,
2831 "or (%esp),%eax\n\t"
2832 "or 0x4(%esp),%ebx\n\t"
2833 "lea 0x8(%esp),%esp");
2837 i386_emit_bit_xor (void)
2839 EMIT_ASM32 (i386_xor
,
2840 "xor (%esp),%eax\n\t"
2841 "xor 0x4(%esp),%ebx\n\t"
2842 "lea 0x8(%esp),%esp");
2846 i386_emit_bit_not (void)
2848 EMIT_ASM32 (i386_bit_not
,
2849 "xor $0xffffffff,%eax\n\t"
2850 "xor $0xffffffff,%ebx\n\t");
2854 i386_emit_equal (void)
2856 EMIT_ASM32 (i386_equal
,
2857 "cmpl %ebx,4(%esp)\n\t"
2858 "jne .Li386_equal_false\n\t"
2859 "cmpl %eax,(%esp)\n\t"
2860 "je .Li386_equal_true\n\t"
2861 ".Li386_equal_false:\n\t"
2863 "jmp .Li386_equal_end\n\t"
2864 ".Li386_equal_true:\n\t"
2866 ".Li386_equal_end:\n\t"
2868 "lea 0x8(%esp),%esp");
2872 i386_emit_less_signed (void)
2874 EMIT_ASM32 (i386_less_signed
,
2875 "cmpl %ebx,4(%esp)\n\t"
2876 "jl .Li386_less_signed_true\n\t"
2877 "jne .Li386_less_signed_false\n\t"
2878 "cmpl %eax,(%esp)\n\t"
2879 "jl .Li386_less_signed_true\n\t"
2880 ".Li386_less_signed_false:\n\t"
2882 "jmp .Li386_less_signed_end\n\t"
2883 ".Li386_less_signed_true:\n\t"
2885 ".Li386_less_signed_end:\n\t"
2887 "lea 0x8(%esp),%esp");
2891 i386_emit_less_unsigned (void)
2893 EMIT_ASM32 (i386_less_unsigned
,
2894 "cmpl %ebx,4(%esp)\n\t"
2895 "jb .Li386_less_unsigned_true\n\t"
2896 "jne .Li386_less_unsigned_false\n\t"
2897 "cmpl %eax,(%esp)\n\t"
2898 "jb .Li386_less_unsigned_true\n\t"
2899 ".Li386_less_unsigned_false:\n\t"
2901 "jmp .Li386_less_unsigned_end\n\t"
2902 ".Li386_less_unsigned_true:\n\t"
2904 ".Li386_less_unsigned_end:\n\t"
2906 "lea 0x8(%esp),%esp");
2910 i386_emit_ref (int size
)
2915 EMIT_ASM32 (i386_ref1
,
2919 EMIT_ASM32 (i386_ref2
,
2923 EMIT_ASM32 (i386_ref4
,
2924 "movl (%eax),%eax");
2927 EMIT_ASM32 (i386_ref8
,
2928 "movl 4(%eax),%ebx\n\t"
2929 "movl (%eax),%eax");
2935 i386_emit_if_goto (int *offset_p
, int *size_p
)
2937 EMIT_ASM32 (i386_if_goto
,
2943 /* Don't trust the assembler to choose the right jump */
2944 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2947 *offset_p
= 11; /* be sure that this matches the sequence above */
2953 i386_emit_goto (int *offset_p
, int *size_p
)
2955 EMIT_ASM32 (i386_goto
,
2956 /* Don't trust the assembler to choose the right jump */
2957 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2965 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2967 int diff
= (to
- (from
+ size
));
2968 unsigned char buf
[sizeof (int)];
2970 /* We're only doing 4-byte sizes at the moment. */
2977 memcpy (buf
, &diff
, sizeof (int));
2978 write_inferior_memory (from
, buf
, sizeof (int));
2982 i386_emit_const (LONGEST num
)
2984 unsigned char buf
[16];
2986 CORE_ADDR buildaddr
= current_insn_ptr
;
2989 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2990 lo
= num
& 0xffffffff;
2991 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2993 hi
= ((num
>> 32) & 0xffffffff);
2996 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2997 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3002 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3004 append_insns (&buildaddr
, i
, buf
);
3005 current_insn_ptr
= buildaddr
;
3009 i386_emit_call (CORE_ADDR fn
)
3011 unsigned char buf
[16];
3013 CORE_ADDR buildaddr
;
3015 buildaddr
= current_insn_ptr
;
3017 buf
[i
++] = 0xe8; /* call <reladdr> */
3018 offset
= ((int) fn
) - (buildaddr
+ 5);
3019 memcpy (buf
+ 1, &offset
, 4);
3020 append_insns (&buildaddr
, 5, buf
);
3021 current_insn_ptr
= buildaddr
;
3025 i386_emit_reg (int reg
)
3027 unsigned char buf
[16];
3029 CORE_ADDR buildaddr
;
3031 EMIT_ASM32 (i386_reg_a
,
3033 buildaddr
= current_insn_ptr
;
3035 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3036 memcpy (&buf
[i
], ®
, sizeof (reg
));
3038 append_insns (&buildaddr
, i
, buf
);
3039 current_insn_ptr
= buildaddr
;
3040 EMIT_ASM32 (i386_reg_b
,
3041 "mov %eax,4(%esp)\n\t"
3042 "mov 8(%ebp),%eax\n\t"
3044 i386_emit_call (get_raw_reg_func_addr ());
3045 EMIT_ASM32 (i386_reg_c
,
3047 "lea 0x8(%esp),%esp");
3051 i386_emit_pop (void)
3053 EMIT_ASM32 (i386_pop
,
3059 i386_emit_stack_flush (void)
3061 EMIT_ASM32 (i386_stack_flush
,
3067 i386_emit_zero_ext (int arg
)
3072 EMIT_ASM32 (i386_zero_ext_8
,
3073 "and $0xff,%eax\n\t"
3077 EMIT_ASM32 (i386_zero_ext_16
,
3078 "and $0xffff,%eax\n\t"
3082 EMIT_ASM32 (i386_zero_ext_32
,
3091 i386_emit_swap (void)
3093 EMIT_ASM32 (i386_swap
,
3103 i386_emit_stack_adjust (int n
)
3105 unsigned char buf
[16];
3107 CORE_ADDR buildaddr
= current_insn_ptr
;
3110 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3114 append_insns (&buildaddr
, i
, buf
);
3115 current_insn_ptr
= buildaddr
;
3118 /* FN's prototype is `LONGEST(*fn)(int)'. */
3121 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3123 unsigned char buf
[16];
3125 CORE_ADDR buildaddr
;
3127 EMIT_ASM32 (i386_int_call_1_a
,
3128 /* Reserve a bit of stack space. */
3130 /* Put the one argument on the stack. */
3131 buildaddr
= current_insn_ptr
;
3133 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3136 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3138 append_insns (&buildaddr
, i
, buf
);
3139 current_insn_ptr
= buildaddr
;
3140 i386_emit_call (fn
);
3141 EMIT_ASM32 (i386_int_call_1_c
,
3143 "lea 0x8(%esp),%esp");
3146 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3149 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3151 unsigned char buf
[16];
3153 CORE_ADDR buildaddr
;
3155 EMIT_ASM32 (i386_void_call_2_a
,
3156 /* Preserve %eax only; we don't have to worry about %ebx. */
3158 /* Reserve a bit of stack space for arguments. */
3159 "sub $0x10,%esp\n\t"
3160 /* Copy "top" to the second argument position. (Note that
3161 we can't assume function won't scribble on its
3162 arguments, so don't try to restore from this.) */
3163 "mov %eax,4(%esp)\n\t"
3164 "mov %ebx,8(%esp)");
3165 /* Put the first argument on the stack. */
3166 buildaddr
= current_insn_ptr
;
3168 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3171 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3173 append_insns (&buildaddr
, i
, buf
);
3174 current_insn_ptr
= buildaddr
;
3175 i386_emit_call (fn
);
3176 EMIT_ASM32 (i386_void_call_2_b
,
3177 "lea 0x10(%esp),%esp\n\t"
3178 /* Restore original stack top. */
3184 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3187 /* Check low half first, more likely to be decider */
3188 "cmpl %eax,(%esp)\n\t"
3189 "jne .Leq_fallthru\n\t"
3190 "cmpl %ebx,4(%esp)\n\t"
3191 "jne .Leq_fallthru\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3195 /* jmp, but don't trust the assembler to choose the right jump */
3196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3197 ".Leq_fallthru:\n\t"
3198 "lea 0x8(%esp),%esp\n\t"
3209 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3212 /* Check low half first, more likely to be decider */
3213 "cmpl %eax,(%esp)\n\t"
3215 "cmpl %ebx,4(%esp)\n\t"
3216 "je .Lne_fallthru\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3221 /* jmp, but don't trust the assembler to choose the right jump */
3222 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3223 ".Lne_fallthru:\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3235 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3238 "cmpl %ebx,4(%esp)\n\t"
3240 "jne .Llt_fallthru\n\t"
3241 "cmpl %eax,(%esp)\n\t"
3242 "jnl .Llt_fallthru\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3247 /* jmp, but don't trust the assembler to choose the right jump */
3248 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3249 ".Llt_fallthru:\n\t"
3250 "lea 0x8(%esp),%esp\n\t"
3261 i386_emit_le_goto (int *offset_p
, int *size_p
)
3264 "cmpl %ebx,4(%esp)\n\t"
3266 "jne .Lle_fallthru\n\t"
3267 "cmpl %eax,(%esp)\n\t"
3268 "jnle .Lle_fallthru\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3273 /* jmp, but don't trust the assembler to choose the right jump */
3274 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3275 ".Lle_fallthru:\n\t"
3276 "lea 0x8(%esp),%esp\n\t"
3287 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3290 "cmpl %ebx,4(%esp)\n\t"
3292 "jne .Lgt_fallthru\n\t"
3293 "cmpl %eax,(%esp)\n\t"
3294 "jng .Lgt_fallthru\n\t"
3296 "lea 0x8(%esp),%esp\n\t"
3299 /* jmp, but don't trust the assembler to choose the right jump */
3300 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3301 ".Lgt_fallthru:\n\t"
3302 "lea 0x8(%esp),%esp\n\t"
3313 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3316 "cmpl %ebx,4(%esp)\n\t"
3318 "jne .Lge_fallthru\n\t"
3319 "cmpl %eax,(%esp)\n\t"
3320 "jnge .Lge_fallthru\n\t"
3322 "lea 0x8(%esp),%esp\n\t"
3325 /* jmp, but don't trust the assembler to choose the right jump */
3326 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3327 ".Lge_fallthru:\n\t"
3328 "lea 0x8(%esp),%esp\n\t"
3338 struct emit_ops i386_emit_ops
=
3346 i386_emit_rsh_signed
,
3347 i386_emit_rsh_unsigned
,
3355 i386_emit_less_signed
,
3356 i386_emit_less_unsigned
,
3360 i386_write_goto_address
,
3365 i386_emit_stack_flush
,
3368 i386_emit_stack_adjust
,
3369 i386_emit_int_call_1
,
3370 i386_emit_void_call_2
,
3380 static struct emit_ops
*
3384 if (is_64bit_tdesc ())
3385 return &amd64_emit_ops
;
3388 return &i386_emit_ops
;
3392 x86_supports_range_stepping (void)
3397 /* This is initialized assuming an amd64 target.
3398 x86_arch_setup will correct it for i386 or amd64 targets. */
3400 struct linux_target_ops the_low_target
=
3403 x86_linux_regs_info
,
3404 x86_cannot_fetch_register
,
3405 x86_cannot_store_register
,
3406 NULL
, /* fetch_register */
3414 x86_supports_z_point_type
,
3417 x86_stopped_by_watchpoint
,
3418 x86_stopped_data_address
,
3419 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3420 native i386 case (no registers smaller than an xfer unit), and are not
3421 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3424 /* need to fix up i386 siginfo if host is amd64 */
3426 x86_linux_new_process
,
3427 x86_linux_new_thread
,
3428 x86_linux_prepare_to_resume
,
3429 x86_linux_process_qsupported
,
3430 x86_supports_tracepoints
,
3431 x86_get_thread_area
,
3432 x86_install_fast_tracepoint_jump_pad
,
3434 x86_get_min_fast_tracepoint_insn_len
,
3435 x86_supports_range_stepping
,
3439 initialize_low_arch (void)
3441 /* Initialize the Linux target descriptions. */
3443 init_registers_amd64_linux ();
3444 init_registers_amd64_avx_linux ();
3445 init_registers_amd64_avx512_linux ();
3446 init_registers_amd64_mpx_linux ();
3448 init_registers_x32_linux ();
3449 init_registers_x32_avx_linux ();
3450 init_registers_x32_avx512_linux ();
3452 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3453 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3454 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3456 init_registers_i386_linux ();
3457 init_registers_i386_mmx_linux ();
3458 init_registers_i386_avx_linux ();
3459 init_registers_i386_avx512_linux ();
3460 init_registers_i386_mpx_linux ();
3462 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3463 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3464 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3466 initialize_regsets_info (&x86_regsets_info
);