1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc
*tdesc_amd64_linux
;
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc
*tdesc_amd64_avx_linux
;
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc
*tdesc_x32_linux
;
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc
*tdesc_x32_avx_linux
;
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc
*tdesc_x32_avx512_linux
;
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc
*tdesc_i386_linux
;
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc
*tdesc_i386_mmx_linux
;
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc
*tdesc_i386_avx_linux
;
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc
*tdesc_i386_avx512_linux
;
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc
*tdesc_i386_mpx_linux
;
95 static struct target_desc
*tdesc_amd64_linux_no_xml
;
97 static struct target_desc
*tdesc_i386_linux_no_xml
;
100 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
103 /* Backward compatibility for gdb without XML support. */
105 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
111 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
149 /* Per-process arch-specific data we want to keep. */
151 struct arch_process_info
153 struct x86_debug_reg_state debug_reg_state
;
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap
[] =
163 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
164 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
165 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
166 DS
* 8, ES
* 8, FS
* 8, GS
* 8
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
175 static const int x86_64_regmap
[] =
177 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
178 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
179 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
180 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
181 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
182 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
205 #else /* ! __x86_64__ */
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap
[] =
211 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
212 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
213 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
214 DS
* 4, ES
* 4, FS
* 4, GS
* 4
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225 /* Returns true if the current inferior belongs to a x86-64 process,
229 is_64bit_tdesc (void)
231 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
233 return register_size (regcache
->tdesc
, 0) == 8;
239 /* Called by libthread_db. */
242 ps_get_thread_area (const struct ps_prochandle
*ph
,
243 lwpid_t lwpid
, int idx
, void **base
)
246 int use_64bit
= is_64bit_tdesc ();
253 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
268 unsigned int desc
[4];
270 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
271 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base
= (void *) (uintptr_t) desc
[1];
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
286 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
289 int use_64bit
= is_64bit_tdesc ();
294 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
296 *addr
= (CORE_ADDR
) (uintptr_t) base
;
305 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
306 struct thread_info
*thr
= get_lwp_thread (lwp
);
307 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
308 unsigned int desc
[4];
310 const int reg_thread_area
= 3; /* bits to scale down register value. */
313 collect_register_by_name (regcache
, "gs", &gs
);
315 idx
= gs
>> reg_thread_area
;
317 if (ptrace (PTRACE_GET_THREAD_AREA
,
319 (void *) (long) idx
, (unsigned long) &desc
) < 0)
330 x86_cannot_store_register (int regno
)
333 if (is_64bit_tdesc ())
337 return regno
>= I386_NUM_REGS
;
341 x86_cannot_fetch_register (int regno
)
344 if (is_64bit_tdesc ())
348 return regno
>= I386_NUM_REGS
;
352 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
357 if (register_size (regcache
->tdesc
, 0) == 8)
359 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
360 if (x86_64_regmap
[i
] != -1)
361 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
370 for (i
= 0; i
< I386_NUM_REGS
; i
++)
371 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
373 collect_register_by_name (regcache
, "orig_eax",
374 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
378 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
383 if (register_size (regcache
->tdesc
, 0) == 8)
385 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
386 if (x86_64_regmap
[i
] != -1)
387 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
392 for (i
= 0; i
< I386_NUM_REGS
; i
++)
393 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
395 supply_register_by_name (regcache
, "orig_eax",
396 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
400 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
403 i387_cache_to_fxsave (regcache
, buf
);
405 i387_cache_to_fsave (regcache
, buf
);
410 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
413 i387_fxsave_to_cache (regcache
, buf
);
415 i387_fsave_to_cache (regcache
, buf
);
422 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
424 i387_cache_to_fxsave (regcache
, buf
);
428 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
430 i387_fxsave_to_cache (regcache
, buf
);
436 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
438 i387_cache_to_xsave (regcache
, buf
);
442 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
444 i387_xsave_to_cache (regcache
, buf
);
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
454 static struct regset_info x86_regsets
[] =
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
459 x86_fill_gregset
, x86_store_gregset
},
460 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
461 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
466 x86_fill_fpxregset
, x86_store_fpxregset
},
469 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
471 x86_fill_fpregset
, x86_store_fpregset
},
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL
, NULL
}
477 x86_get_pc (struct regcache
*regcache
)
479 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
484 collect_register_by_name (regcache
, "rip", &pc
);
485 return (CORE_ADDR
) pc
;
490 collect_register_by_name (regcache
, "eip", &pc
);
491 return (CORE_ADDR
) pc
;
496 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
498 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
502 unsigned long newpc
= pc
;
503 supply_register_by_name (regcache
, "rip", &newpc
);
507 unsigned int newpc
= pc
;
508 supply_register_by_name (regcache
, "eip", &newpc
);
512 static const unsigned char x86_breakpoint
[] = { 0xCC };
513 #define x86_breakpoint_len 1
516 x86_breakpoint_at (CORE_ADDR pc
)
520 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Return the offset of REGNUM in the u_debugreg field of struct
532 u_debugreg_offset (int regnum
)
534 return (offsetof (struct user
, u_debugreg
)
535 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
539 /* Support for debug registers. */
542 x86_linux_dr_get (ptid_t ptid
, int regnum
)
547 gdb_assert (ptid_lwp_p (ptid
));
548 tid
= ptid_get_lwp (ptid
);
551 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
553 perror_with_name (_("Couldn't read debug register"));
559 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
563 gdb_assert (ptid_lwp_p (ptid
));
564 tid
= ptid_get_lwp (ptid
);
567 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
569 perror_with_name (_("Couldn't write debug register"));
573 update_debug_registers_callback (struct lwp_info
*lwp
, void *arg
)
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp_set_debug_registers_changed (lwp
, 1);
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
581 if (!lwp_is_stopped (lwp
))
582 linux_stop_lwp (lwp
);
587 /* Update the inferior's debug register REGNUM from STATE. */
590 x86_linux_dr_set_addr (int regnum
, CORE_ADDR addr
)
592 /* Only update the threads of this process. */
593 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
595 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
597 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
600 /* Return the inferior's debug register REGNUM. */
603 x86_linux_dr_get_addr (int regnum
)
605 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
607 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
610 /* Update the inferior's DR7 debug control register from STATE. */
613 x86_linux_dr_set_control (unsigned long control
)
615 /* Only update the threads of this process. */
616 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
618 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
621 /* Return the inferior's DR7 debug control register. */
624 x86_linux_dr_get_control (void)
626 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
629 /* Get the value of the DR6 debug status register from the inferior
630 and record it in STATE. */
633 x86_linux_dr_get_status (void)
635 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
638 /* Low-level function vector. */
639 struct x86_dr_low_type x86_dr_low
=
641 x86_linux_dr_set_control
,
642 x86_linux_dr_set_addr
,
643 x86_linux_dr_get_addr
,
644 x86_linux_dr_get_status
,
645 x86_linux_dr_get_control
,
649 /* Breakpoint/Watchpoint support. */
652 x86_supports_z_point_type (char z_type
)
658 case Z_PACKET_WRITE_WP
:
659 case Z_PACKET_ACCESS_WP
:
667 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
668 int size
, struct raw_breakpoint
*bp
)
670 struct process_info
*proc
= current_process ();
674 case raw_bkpt_type_sw
:
675 return insert_memory_breakpoint (bp
);
677 case raw_bkpt_type_hw
:
678 case raw_bkpt_type_write_wp
:
679 case raw_bkpt_type_access_wp
:
681 enum target_hw_bp_type hw_type
682 = raw_bkpt_type_to_target_hw_bp_type (type
);
683 struct x86_debug_reg_state
*state
684 = &proc
->priv
->arch_private
->debug_reg_state
;
686 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
696 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
697 int size
, struct raw_breakpoint
*bp
)
699 struct process_info
*proc
= current_process ();
703 case raw_bkpt_type_sw
:
704 return remove_memory_breakpoint (bp
);
706 case raw_bkpt_type_hw
:
707 case raw_bkpt_type_write_wp
:
708 case raw_bkpt_type_access_wp
:
710 enum target_hw_bp_type hw_type
711 = raw_bkpt_type_to_target_hw_bp_type (type
);
712 struct x86_debug_reg_state
*state
713 = &proc
->priv
->arch_private
->debug_reg_state
;
715 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
724 x86_stopped_by_watchpoint (void)
726 struct process_info
*proc
= current_process ();
727 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
731 x86_stopped_data_address (void)
733 struct process_info
*proc
= current_process ();
735 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
741 /* Called when a new process is created. */
743 static struct arch_process_info
*
744 x86_linux_new_process (void)
746 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
748 x86_low_init_dregs (&info
->debug_reg_state
);
753 /* Called when a new thread is detected. */
756 x86_linux_new_thread (struct lwp_info
*lwp
)
758 lwp_set_debug_registers_changed (lwp
, 1);
761 /* See nat/x86-dregs.h. */
763 struct x86_debug_reg_state
*
764 x86_debug_reg_state (pid_t pid
)
766 struct process_info
*proc
= find_process_pid (pid
);
768 return &proc
->priv
->arch_private
->debug_reg_state
;
771 /* Called when resuming a thread.
772 If the debug regs have changed, update the thread's copies. */
775 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
777 ptid_t ptid
= ptid_of_lwp (lwp
);
778 int clear_status
= 0;
780 if (lwp_debug_registers_changed (lwp
))
782 struct x86_debug_reg_state
*state
783 = x86_debug_reg_state (ptid_get_pid (ptid
));
786 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
788 ALL_DEBUG_ADDRESS_REGISTERS (i
)
789 if (state
->dr_ref_count
[i
] > 0)
791 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
793 /* If we're setting a watchpoint, any change the inferior
794 had done itself to the debug registers needs to be
795 discarded, otherwise, x86_dr_stopped_data_address can
800 if (state
->dr_control_mirror
!= 0)
801 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
803 lwp_set_debug_registers_changed (lwp
, 0);
807 || lwp_stop_reason (lwp
) == TARGET_STOPPED_BY_WATCHPOINT
)
808 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
811 /* When GDBSERVER is built as a 64-bit application on linux, the
812 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
813 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
814 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
815 conversion in-place ourselves. */
817 /* These types below (compat_*) define a siginfo type that is layout
818 compatible with the siginfo type exported by the 32-bit userspace
823 typedef int compat_int_t
;
824 typedef unsigned int compat_uptr_t
;
826 typedef int compat_time_t
;
827 typedef int compat_timer_t
;
828 typedef int compat_clock_t
;
830 struct compat_timeval
832 compat_time_t tv_sec
;
836 typedef union compat_sigval
838 compat_int_t sival_int
;
839 compat_uptr_t sival_ptr
;
842 typedef struct compat_siginfo
850 int _pad
[((128 / sizeof (int)) - 3)];
859 /* POSIX.1b timers */
864 compat_sigval_t _sigval
;
867 /* POSIX.1b signals */
872 compat_sigval_t _sigval
;
881 compat_clock_t _utime
;
882 compat_clock_t _stime
;
885 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
900 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
901 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
903 typedef struct compat_x32_siginfo
911 int _pad
[((128 / sizeof (int)) - 3)];
920 /* POSIX.1b timers */
925 compat_sigval_t _sigval
;
928 /* POSIX.1b signals */
933 compat_sigval_t _sigval
;
942 compat_x32_clock_t _utime
;
943 compat_x32_clock_t _stime
;
946 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
959 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
961 #define cpt_si_pid _sifields._kill._pid
962 #define cpt_si_uid _sifields._kill._uid
963 #define cpt_si_timerid _sifields._timer._tid
964 #define cpt_si_overrun _sifields._timer._overrun
965 #define cpt_si_status _sifields._sigchld._status
966 #define cpt_si_utime _sifields._sigchld._utime
967 #define cpt_si_stime _sifields._sigchld._stime
968 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
969 #define cpt_si_addr _sifields._sigfault._addr
970 #define cpt_si_band _sifields._sigpoll._band
971 #define cpt_si_fd _sifields._sigpoll._fd
973 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
974 In their place is si_timer1,si_timer2. */
976 #define si_timerid si_timer1
979 #define si_overrun si_timer2
983 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
985 memset (to
, 0, sizeof (*to
));
987 to
->si_signo
= from
->si_signo
;
988 to
->si_errno
= from
->si_errno
;
989 to
->si_code
= from
->si_code
;
991 if (to
->si_code
== SI_TIMER
)
993 to
->cpt_si_timerid
= from
->si_timerid
;
994 to
->cpt_si_overrun
= from
->si_overrun
;
995 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
997 else if (to
->si_code
== SI_USER
)
999 to
->cpt_si_pid
= from
->si_pid
;
1000 to
->cpt_si_uid
= from
->si_uid
;
1002 else if (to
->si_code
< 0)
1004 to
->cpt_si_pid
= from
->si_pid
;
1005 to
->cpt_si_uid
= from
->si_uid
;
1006 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1010 switch (to
->si_signo
)
1013 to
->cpt_si_pid
= from
->si_pid
;
1014 to
->cpt_si_uid
= from
->si_uid
;
1015 to
->cpt_si_status
= from
->si_status
;
1016 to
->cpt_si_utime
= from
->si_utime
;
1017 to
->cpt_si_stime
= from
->si_stime
;
1023 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1026 to
->cpt_si_band
= from
->si_band
;
1027 to
->cpt_si_fd
= from
->si_fd
;
1030 to
->cpt_si_pid
= from
->si_pid
;
1031 to
->cpt_si_uid
= from
->si_uid
;
1032 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1039 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1041 memset (to
, 0, sizeof (*to
));
1043 to
->si_signo
= from
->si_signo
;
1044 to
->si_errno
= from
->si_errno
;
1045 to
->si_code
= from
->si_code
;
1047 if (to
->si_code
== SI_TIMER
)
1049 to
->si_timerid
= from
->cpt_si_timerid
;
1050 to
->si_overrun
= from
->cpt_si_overrun
;
1051 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1053 else if (to
->si_code
== SI_USER
)
1055 to
->si_pid
= from
->cpt_si_pid
;
1056 to
->si_uid
= from
->cpt_si_uid
;
1058 else if (to
->si_code
< 0)
1060 to
->si_pid
= from
->cpt_si_pid
;
1061 to
->si_uid
= from
->cpt_si_uid
;
1062 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1066 switch (to
->si_signo
)
1069 to
->si_pid
= from
->cpt_si_pid
;
1070 to
->si_uid
= from
->cpt_si_uid
;
1071 to
->si_status
= from
->cpt_si_status
;
1072 to
->si_utime
= from
->cpt_si_utime
;
1073 to
->si_stime
= from
->cpt_si_stime
;
1079 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1082 to
->si_band
= from
->cpt_si_band
;
1083 to
->si_fd
= from
->cpt_si_fd
;
1086 to
->si_pid
= from
->cpt_si_pid
;
1087 to
->si_uid
= from
->cpt_si_uid
;
1088 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1095 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1098 memset (to
, 0, sizeof (*to
));
1100 to
->si_signo
= from
->si_signo
;
1101 to
->si_errno
= from
->si_errno
;
1102 to
->si_code
= from
->si_code
;
1104 if (to
->si_code
== SI_TIMER
)
1106 to
->cpt_si_timerid
= from
->si_timerid
;
1107 to
->cpt_si_overrun
= from
->si_overrun
;
1108 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1110 else if (to
->si_code
== SI_USER
)
1112 to
->cpt_si_pid
= from
->si_pid
;
1113 to
->cpt_si_uid
= from
->si_uid
;
1115 else if (to
->si_code
< 0)
1117 to
->cpt_si_pid
= from
->si_pid
;
1118 to
->cpt_si_uid
= from
->si_uid
;
1119 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1123 switch (to
->si_signo
)
1126 to
->cpt_si_pid
= from
->si_pid
;
1127 to
->cpt_si_uid
= from
->si_uid
;
1128 to
->cpt_si_status
= from
->si_status
;
1129 to
->cpt_si_utime
= from
->si_utime
;
1130 to
->cpt_si_stime
= from
->si_stime
;
1136 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1139 to
->cpt_si_band
= from
->si_band
;
1140 to
->cpt_si_fd
= from
->si_fd
;
1143 to
->cpt_si_pid
= from
->si_pid
;
1144 to
->cpt_si_uid
= from
->si_uid
;
1145 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1152 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1153 compat_x32_siginfo_t
*from
)
1155 memset (to
, 0, sizeof (*to
));
1157 to
->si_signo
= from
->si_signo
;
1158 to
->si_errno
= from
->si_errno
;
1159 to
->si_code
= from
->si_code
;
1161 if (to
->si_code
== SI_TIMER
)
1163 to
->si_timerid
= from
->cpt_si_timerid
;
1164 to
->si_overrun
= from
->cpt_si_overrun
;
1165 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1167 else if (to
->si_code
== SI_USER
)
1169 to
->si_pid
= from
->cpt_si_pid
;
1170 to
->si_uid
= from
->cpt_si_uid
;
1172 else if (to
->si_code
< 0)
1174 to
->si_pid
= from
->cpt_si_pid
;
1175 to
->si_uid
= from
->cpt_si_uid
;
1176 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1180 switch (to
->si_signo
)
1183 to
->si_pid
= from
->cpt_si_pid
;
1184 to
->si_uid
= from
->cpt_si_uid
;
1185 to
->si_status
= from
->cpt_si_status
;
1186 to
->si_utime
= from
->cpt_si_utime
;
1187 to
->si_stime
= from
->cpt_si_stime
;
1193 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1196 to
->si_band
= from
->cpt_si_band
;
1197 to
->si_fd
= from
->cpt_si_fd
;
1200 to
->si_pid
= from
->cpt_si_pid
;
1201 to
->si_uid
= from
->cpt_si_uid
;
1202 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1208 #endif /* __x86_64__ */
1210 /* Convert a native/host siginfo object, into/from the siginfo in the
1211 layout of the inferiors' architecture. Returns true if any
1212 conversion was done; false otherwise. If DIRECTION is 1, then copy
1213 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1217 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1220 unsigned int machine
;
1221 int tid
= lwpid_of (current_thread
);
1222 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1224 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1225 if (!is_64bit_tdesc ())
1227 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1230 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1232 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1236 /* No fixup for native x32 GDB. */
1237 else if (!is_elf64
&& sizeof (void *) == 8)
1239 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1242 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1245 siginfo_from_compat_x32_siginfo (native
,
1246 (struct compat_x32_siginfo
*) inf
);
1257 /* Format of XSAVE extended state is:
1260 fxsave_bytes[0..463]
1261 sw_usable_bytes[464..511]
1262 xstate_hdr_bytes[512..575]
1267 Same memory layout will be used for the coredump NT_X86_XSTATE
1268 representing the XSAVE extended state registers.
1270 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1271 extended state mask, which is the same as the extended control register
1272 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1273 together with the mask saved in the xstate_hdr_bytes to determine what
1274 states the processor/OS supports and what state, used or initialized,
1275 the process/thread is in. */
1276 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1278 /* Does the current host support the GETFPXREGS request? The header
1279 file may or may not define it, and even if it is defined, the
1280 kernel will return EIO if it's running on a pre-SSE processor. */
1281 int have_ptrace_getfpxregs
=
1282 #ifdef HAVE_PTRACE_GETFPXREGS
1289 /* Does the current host support PTRACE_GETREGSET? */
1290 static int have_ptrace_getregset
= -1;
1292 /* Get Linux/x86 target description from running target. */
1294 static const struct target_desc
*
1295 x86_linux_read_description (void)
1297 unsigned int machine
;
1301 static uint64_t xcr0
;
1302 struct regset_info
*regset
;
1304 tid
= lwpid_of (current_thread
);
1306 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1308 if (sizeof (void *) == 4)
1311 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1313 else if (machine
== EM_X86_64
)
1314 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1318 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1319 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1321 elf_fpxregset_t fpxregs
;
1323 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1325 have_ptrace_getfpxregs
= 0;
1326 have_ptrace_getregset
= 0;
1327 return tdesc_i386_mmx_linux
;
1330 have_ptrace_getfpxregs
= 1;
1336 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1338 /* Don't use XML. */
1340 if (machine
== EM_X86_64
)
1341 return tdesc_amd64_linux_no_xml
;
1344 return tdesc_i386_linux_no_xml
;
1347 if (have_ptrace_getregset
== -1)
1349 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1352 iov
.iov_base
= xstateregs
;
1353 iov
.iov_len
= sizeof (xstateregs
);
1355 /* Check if PTRACE_GETREGSET works. */
1356 if (ptrace (PTRACE_GETREGSET
, tid
,
1357 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1358 have_ptrace_getregset
= 0;
1361 have_ptrace_getregset
= 1;
1363 /* Get XCR0 from XSAVE extended state. */
1364 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1365 / sizeof (uint64_t))];
1367 /* Use PTRACE_GETREGSET if it is available. */
1368 for (regset
= x86_regsets
;
1369 regset
->fill_function
!= NULL
; regset
++)
1370 if (regset
->get_request
== PTRACE_GETREGSET
)
1371 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1372 else if (regset
->type
!= GENERAL_REGS
)
1377 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1378 xcr0_features
= (have_ptrace_getregset
1379 && (xcr0
& X86_XSTATE_ALL_MASK
));
1384 if (machine
== EM_X86_64
)
1391 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1393 case X86_XSTATE_AVX512_MASK
:
1394 return tdesc_amd64_avx512_linux
;
1396 case X86_XSTATE_MPX_MASK
:
1397 return tdesc_amd64_mpx_linux
;
1399 case X86_XSTATE_AVX_MASK
:
1400 return tdesc_amd64_avx_linux
;
1403 return tdesc_amd64_linux
;
1407 return tdesc_amd64_linux
;
1413 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1415 case X86_XSTATE_AVX512_MASK
:
1416 return tdesc_x32_avx512_linux
;
1418 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1419 case X86_XSTATE_AVX_MASK
:
1420 return tdesc_x32_avx_linux
;
1423 return tdesc_x32_linux
;
1427 return tdesc_x32_linux
;
1435 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1437 case (X86_XSTATE_AVX512_MASK
):
1438 return tdesc_i386_avx512_linux
;
1440 case (X86_XSTATE_MPX_MASK
):
1441 return tdesc_i386_mpx_linux
;
1443 case (X86_XSTATE_AVX_MASK
):
1444 return tdesc_i386_avx_linux
;
1447 return tdesc_i386_linux
;
1451 return tdesc_i386_linux
;
1454 gdb_assert_not_reached ("failed to return tdesc");
1457 /* Callback for find_inferior. Stops iteration when a thread with a
1458 given PID is found. */
1461 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1463 int pid
= *(int *) data
;
1465 return (ptid_get_pid (entry
->id
) == pid
);
1468 /* Callback for for_each_inferior. Calls the arch_setup routine for
1472 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1474 int pid
= ptid_get_pid (entry
->id
);
1476 /* Look up any thread of this processes. */
1478 = (struct thread_info
*) find_inferior (&all_threads
,
1479 same_process_callback
, &pid
);
1481 the_low_target
.arch_setup ();
1484 /* Update all the target description of all processes; a new GDB
1485 connected, and it may or not support xml target descriptions. */
1488 x86_linux_update_xmltarget (void)
1490 struct thread_info
*saved_thread
= current_thread
;
1492 /* Before changing the register cache's internal layout, flush the
1493 contents of the current valid caches back to the threads, and
1494 release the current regcache objects. */
1495 regcache_release ();
1497 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1499 current_thread
= saved_thread
;
1502 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1503 PTRACE_GETREGSET. */
1506 x86_linux_process_qsupported (const char *query
)
1508 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1509 with "i386" in qSupported query, it supports x86 XML target
1512 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1514 char *copy
= xstrdup (query
+ 13);
1517 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1519 if (strcmp (p
, "i386") == 0)
1529 x86_linux_update_xmltarget ();
1532 /* Common for x86/x86-64. */
1534 static struct regsets_info x86_regsets_info
=
1536 x86_regsets
, /* regsets */
1537 0, /* num_regsets */
1538 NULL
, /* disabled_regsets */
1542 static struct regs_info amd64_linux_regs_info
=
1544 NULL
, /* regset_bitmap */
1545 NULL
, /* usrregs_info */
1549 static struct usrregs_info i386_linux_usrregs_info
=
1555 static struct regs_info i386_linux_regs_info
=
1557 NULL
, /* regset_bitmap */
1558 &i386_linux_usrregs_info
,
1562 const struct regs_info
*
1563 x86_linux_regs_info (void)
1566 if (is_64bit_tdesc ())
1567 return &amd64_linux_regs_info
;
1570 return &i386_linux_regs_info
;
1573 /* Initialize the target description for the architecture of the
1577 x86_arch_setup (void)
1579 current_process ()->tdesc
= x86_linux_read_description ();
1583 x86_supports_tracepoints (void)
1589 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1591 write_inferior_memory (*to
, buf
, len
);
1596 push_opcode (unsigned char *buf
, char *op
)
1598 unsigned char *buf_org
= buf
;
1603 unsigned long ul
= strtoul (op
, &endptr
, 16);
1612 return buf
- buf_org
;
1617 /* Build a jump pad that saves registers and calls a collection
1618 function. Writes a jump instruction to the jump pad to
1619 JJUMPAD_INSN. The caller is responsible to write it in at the
1620 tracepoint address. */
1623 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1624 CORE_ADDR collector
,
1627 CORE_ADDR
*jump_entry
,
1628 CORE_ADDR
*trampoline
,
1629 ULONGEST
*trampoline_size
,
1630 unsigned char *jjump_pad_insn
,
1631 ULONGEST
*jjump_pad_insn_size
,
1632 CORE_ADDR
*adjusted_insn_addr
,
1633 CORE_ADDR
*adjusted_insn_addr_end
,
1636 unsigned char buf
[40];
1640 CORE_ADDR buildaddr
= *jump_entry
;
1642 /* Build the jump pad. */
1644 /* First, do tracepoint data collection. Save registers. */
1646 /* Need to ensure stack pointer saved first. */
1647 buf
[i
++] = 0x54; /* push %rsp */
1648 buf
[i
++] = 0x55; /* push %rbp */
1649 buf
[i
++] = 0x57; /* push %rdi */
1650 buf
[i
++] = 0x56; /* push %rsi */
1651 buf
[i
++] = 0x52; /* push %rdx */
1652 buf
[i
++] = 0x51; /* push %rcx */
1653 buf
[i
++] = 0x53; /* push %rbx */
1654 buf
[i
++] = 0x50; /* push %rax */
1655 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1656 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1657 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1658 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1659 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1660 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1661 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1662 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1663 buf
[i
++] = 0x9c; /* pushfq */
1664 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1666 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1667 i
+= sizeof (unsigned long);
1668 buf
[i
++] = 0x57; /* push %rdi */
1669 append_insns (&buildaddr
, i
, buf
);
1671 /* Stack space for the collecting_t object. */
1673 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1674 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1675 memcpy (buf
+ i
, &tpoint
, 8);
1677 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1678 i
+= push_opcode (&buf
[i
],
1679 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1680 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1681 append_insns (&buildaddr
, i
, buf
);
1685 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1686 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1688 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1689 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1690 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1691 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1692 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1693 append_insns (&buildaddr
, i
, buf
);
1695 /* Set up the gdb_collect call. */
1696 /* At this point, (stack pointer + 0x18) is the base of our saved
1700 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1701 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1703 /* tpoint address may be 64-bit wide. */
1704 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1705 memcpy (buf
+ i
, &tpoint
, 8);
1707 append_insns (&buildaddr
, i
, buf
);
1709 /* The collector function being in the shared library, may be
1710 >31-bits away off the jump pad. */
1712 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1713 memcpy (buf
+ i
, &collector
, 8);
1715 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1716 append_insns (&buildaddr
, i
, buf
);
1718 /* Clear the spin-lock. */
1720 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1721 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1722 memcpy (buf
+ i
, &lockaddr
, 8);
1724 append_insns (&buildaddr
, i
, buf
);
1726 /* Remove stack that had been used for the collect_t object. */
1728 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1729 append_insns (&buildaddr
, i
, buf
);
1731 /* Restore register state. */
1733 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1737 buf
[i
++] = 0x9d; /* popfq */
1738 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1739 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1740 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1741 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1742 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1743 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1744 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1745 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1746 buf
[i
++] = 0x58; /* pop %rax */
1747 buf
[i
++] = 0x5b; /* pop %rbx */
1748 buf
[i
++] = 0x59; /* pop %rcx */
1749 buf
[i
++] = 0x5a; /* pop %rdx */
1750 buf
[i
++] = 0x5e; /* pop %rsi */
1751 buf
[i
++] = 0x5f; /* pop %rdi */
1752 buf
[i
++] = 0x5d; /* pop %rbp */
1753 buf
[i
++] = 0x5c; /* pop %rsp */
1754 append_insns (&buildaddr
, i
, buf
);
1756 /* Now, adjust the original instruction to execute in the jump
1758 *adjusted_insn_addr
= buildaddr
;
1759 relocate_instruction (&buildaddr
, tpaddr
);
1760 *adjusted_insn_addr_end
= buildaddr
;
1762 /* Finally, write a jump back to the program. */
1764 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1765 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1768 "E.Jump back from jump pad too far from tracepoint "
1769 "(offset 0x%" PRIx64
" > int32).", loffset
);
1773 offset
= (int) loffset
;
1774 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1775 memcpy (buf
+ 1, &offset
, 4);
1776 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1778 /* The jump pad is now built. Wire in a jump to our jump pad. This
1779 is always done last (by our caller actually), so that we can
1780 install fast tracepoints with threads running. This relies on
1781 the agent's atomic write support. */
1782 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1783 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1786 "E.Jump pad too far from tracepoint "
1787 "(offset 0x%" PRIx64
" > int32).", loffset
);
1791 offset
= (int) loffset
;
1793 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1794 memcpy (buf
+ 1, &offset
, 4);
1795 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1796 *jjump_pad_insn_size
= sizeof (jump_insn
);
1798 /* Return the end address of our pad. */
1799 *jump_entry
= buildaddr
;
1804 #endif /* __x86_64__ */
1806 /* Build a jump pad that saves registers and calls a collection
1807 function. Writes a jump instruction to the jump pad to
1808 JJUMPAD_INSN. The caller is responsible to write it in at the
1809 tracepoint address. */
1812 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1813 CORE_ADDR collector
,
1816 CORE_ADDR
*jump_entry
,
1817 CORE_ADDR
*trampoline
,
1818 ULONGEST
*trampoline_size
,
1819 unsigned char *jjump_pad_insn
,
1820 ULONGEST
*jjump_pad_insn_size
,
1821 CORE_ADDR
*adjusted_insn_addr
,
1822 CORE_ADDR
*adjusted_insn_addr_end
,
1825 unsigned char buf
[0x100];
1827 CORE_ADDR buildaddr
= *jump_entry
;
1829 /* Build the jump pad. */
1831 /* First, do tracepoint data collection. Save registers. */
1833 buf
[i
++] = 0x60; /* pushad */
1834 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1835 *((int *)(buf
+ i
)) = (int) tpaddr
;
1837 buf
[i
++] = 0x9c; /* pushf */
1838 buf
[i
++] = 0x1e; /* push %ds */
1839 buf
[i
++] = 0x06; /* push %es */
1840 buf
[i
++] = 0x0f; /* push %fs */
1842 buf
[i
++] = 0x0f; /* push %gs */
1844 buf
[i
++] = 0x16; /* push %ss */
1845 buf
[i
++] = 0x0e; /* push %cs */
1846 append_insns (&buildaddr
, i
, buf
);
1848 /* Stack space for the collecting_t object. */
1850 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1852 /* Build the object. */
1853 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1854 memcpy (buf
+ i
, &tpoint
, 4);
1856 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1858 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1859 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1860 append_insns (&buildaddr
, i
, buf
);
1862 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1863 If we cared for it, this could be using xchg alternatively. */
1866 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1867 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1869 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1871 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1872 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1873 append_insns (&buildaddr
, i
, buf
);
1876 /* Set up arguments to the gdb_collect call. */
1878 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1879 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1880 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1881 append_insns (&buildaddr
, i
, buf
);
1884 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1885 append_insns (&buildaddr
, i
, buf
);
1888 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1889 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1891 append_insns (&buildaddr
, i
, buf
);
1893 buf
[0] = 0xe8; /* call <reladdr> */
1894 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1895 memcpy (buf
+ 1, &offset
, 4);
1896 append_insns (&buildaddr
, 5, buf
);
1897 /* Clean up after the call. */
1898 buf
[0] = 0x83; /* add $0x8,%esp */
1901 append_insns (&buildaddr
, 3, buf
);
1904 /* Clear the spin-lock. This would need the LOCK prefix on older
1907 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1908 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1909 memcpy (buf
+ i
, &lockaddr
, 4);
1911 append_insns (&buildaddr
, i
, buf
);
1914 /* Remove stack that had been used for the collect_t object. */
1916 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1917 append_insns (&buildaddr
, i
, buf
);
1920 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1923 buf
[i
++] = 0x17; /* pop %ss */
1924 buf
[i
++] = 0x0f; /* pop %gs */
1926 buf
[i
++] = 0x0f; /* pop %fs */
1928 buf
[i
++] = 0x07; /* pop %es */
1929 buf
[i
++] = 0x1f; /* pop %ds */
1930 buf
[i
++] = 0x9d; /* popf */
1931 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1934 buf
[i
++] = 0x61; /* popad */
1935 append_insns (&buildaddr
, i
, buf
);
1937 /* Now, adjust the original instruction to execute in the jump
1939 *adjusted_insn_addr
= buildaddr
;
1940 relocate_instruction (&buildaddr
, tpaddr
);
1941 *adjusted_insn_addr_end
= buildaddr
;
1943 /* Write the jump back to the program. */
1944 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1945 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1946 memcpy (buf
+ 1, &offset
, 4);
1947 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1949 /* The jump pad is now built. Wire in a jump to our jump pad. This
1950 is always done last (by our caller actually), so that we can
1951 install fast tracepoints with threads running. This relies on
1952 the agent's atomic write support. */
1955 /* Create a trampoline. */
1956 *trampoline_size
= sizeof (jump_insn
);
1957 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1959 /* No trampoline space available. */
1961 "E.Cannot allocate trampoline space needed for fast "
1962 "tracepoints on 4-byte instructions.");
1966 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1967 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1968 memcpy (buf
+ 1, &offset
, 4);
1969 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1971 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1972 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1973 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1974 memcpy (buf
+ 2, &offset
, 2);
1975 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1976 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1980 /* Else use a 32-bit relative jump instruction. */
1981 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1982 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1983 memcpy (buf
+ 1, &offset
, 4);
1984 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1985 *jjump_pad_insn_size
= sizeof (jump_insn
);
1988 /* Return the end address of our pad. */
1989 *jump_entry
= buildaddr
;
1995 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1996 CORE_ADDR collector
,
1999 CORE_ADDR
*jump_entry
,
2000 CORE_ADDR
*trampoline
,
2001 ULONGEST
*trampoline_size
,
2002 unsigned char *jjump_pad_insn
,
2003 ULONGEST
*jjump_pad_insn_size
,
2004 CORE_ADDR
*adjusted_insn_addr
,
2005 CORE_ADDR
*adjusted_insn_addr_end
,
2009 if (is_64bit_tdesc ())
2010 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2011 collector
, lockaddr
,
2012 orig_size
, jump_entry
,
2013 trampoline
, trampoline_size
,
2015 jjump_pad_insn_size
,
2017 adjusted_insn_addr_end
,
2021 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2022 collector
, lockaddr
,
2023 orig_size
, jump_entry
,
2024 trampoline
, trampoline_size
,
2026 jjump_pad_insn_size
,
2028 adjusted_insn_addr_end
,
2032 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2036 x86_get_min_fast_tracepoint_insn_len (void)
2038 static int warned_about_fast_tracepoints
= 0;
2041 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2042 used for fast tracepoints. */
2043 if (is_64bit_tdesc ())
2047 if (agent_loaded_p ())
2049 char errbuf
[IPA_BUFSIZ
];
2053 /* On x86, if trampolines are available, then 4-byte jump instructions
2054 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2055 with a 4-byte offset are used instead. */
2056 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2060 /* GDB has no channel to explain to user why a shorter fast
2061 tracepoint is not possible, but at least make GDBserver
2062 mention that something has gone awry. */
2063 if (!warned_about_fast_tracepoints
)
2065 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2066 warned_about_fast_tracepoints
= 1;
2073 /* Indicate that the minimum length is currently unknown since the IPA
2074 has not loaded yet. */
2080 add_insns (unsigned char *start
, int len
)
2082 CORE_ADDR buildaddr
= current_insn_ptr
;
2085 debug_printf ("Adding %d bytes of insn at %s\n",
2086 len
, paddress (buildaddr
));
2088 append_insns (&buildaddr
, len
, start
);
2089 current_insn_ptr
= buildaddr
;
2092 /* Our general strategy for emitting code is to avoid specifying raw
2093 bytes whenever possible, and instead copy a block of inline asm
2094 that is embedded in the function. This is a little messy, because
2095 we need to keep the compiler from discarding what looks like dead
2096 code, plus suppress various warnings. */
2098 #define EMIT_ASM(NAME, INSNS) \
2101 extern unsigned char start_ ## NAME, end_ ## NAME; \
2102 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2103 __asm__ ("jmp end_" #NAME "\n" \
2104 "\t" "start_" #NAME ":" \
2106 "\t" "end_" #NAME ":"); \
2111 #define EMIT_ASM32(NAME,INSNS) \
2114 extern unsigned char start_ ## NAME, end_ ## NAME; \
2115 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2116 __asm__ (".code32\n" \
2117 "\t" "jmp end_" #NAME "\n" \
2118 "\t" "start_" #NAME ":\n" \
2120 "\t" "end_" #NAME ":\n" \
2126 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2133 amd64_emit_prologue (void)
2135 EMIT_ASM (amd64_prologue
,
2137 "movq %rsp,%rbp\n\t"
2138 "sub $0x20,%rsp\n\t"
2139 "movq %rdi,-8(%rbp)\n\t"
2140 "movq %rsi,-16(%rbp)");
2145 amd64_emit_epilogue (void)
2147 EMIT_ASM (amd64_epilogue
,
2148 "movq -16(%rbp),%rdi\n\t"
2149 "movq %rax,(%rdi)\n\t"
2156 amd64_emit_add (void)
2158 EMIT_ASM (amd64_add
,
2159 "add (%rsp),%rax\n\t"
2160 "lea 0x8(%rsp),%rsp");
2164 amd64_emit_sub (void)
2166 EMIT_ASM (amd64_sub
,
2167 "sub %rax,(%rsp)\n\t"
2172 amd64_emit_mul (void)
2178 amd64_emit_lsh (void)
2184 amd64_emit_rsh_signed (void)
2190 amd64_emit_rsh_unsigned (void)
2196 amd64_emit_ext (int arg
)
2201 EMIT_ASM (amd64_ext_8
,
2207 EMIT_ASM (amd64_ext_16
,
2212 EMIT_ASM (amd64_ext_32
,
2221 amd64_emit_log_not (void)
2223 EMIT_ASM (amd64_log_not
,
2224 "test %rax,%rax\n\t"
2230 amd64_emit_bit_and (void)
2232 EMIT_ASM (amd64_and
,
2233 "and (%rsp),%rax\n\t"
2234 "lea 0x8(%rsp),%rsp");
2238 amd64_emit_bit_or (void)
2241 "or (%rsp),%rax\n\t"
2242 "lea 0x8(%rsp),%rsp");
2246 amd64_emit_bit_xor (void)
2248 EMIT_ASM (amd64_xor
,
2249 "xor (%rsp),%rax\n\t"
2250 "lea 0x8(%rsp),%rsp");
2254 amd64_emit_bit_not (void)
2256 EMIT_ASM (amd64_bit_not
,
2257 "xorq $0xffffffffffffffff,%rax");
2261 amd64_emit_equal (void)
2263 EMIT_ASM (amd64_equal
,
2264 "cmp %rax,(%rsp)\n\t"
2265 "je .Lamd64_equal_true\n\t"
2267 "jmp .Lamd64_equal_end\n\t"
2268 ".Lamd64_equal_true:\n\t"
2270 ".Lamd64_equal_end:\n\t"
2271 "lea 0x8(%rsp),%rsp");
2275 amd64_emit_less_signed (void)
2277 EMIT_ASM (amd64_less_signed
,
2278 "cmp %rax,(%rsp)\n\t"
2279 "jl .Lamd64_less_signed_true\n\t"
2281 "jmp .Lamd64_less_signed_end\n\t"
2282 ".Lamd64_less_signed_true:\n\t"
2284 ".Lamd64_less_signed_end:\n\t"
2285 "lea 0x8(%rsp),%rsp");
2289 amd64_emit_less_unsigned (void)
2291 EMIT_ASM (amd64_less_unsigned
,
2292 "cmp %rax,(%rsp)\n\t"
2293 "jb .Lamd64_less_unsigned_true\n\t"
2295 "jmp .Lamd64_less_unsigned_end\n\t"
2296 ".Lamd64_less_unsigned_true:\n\t"
2298 ".Lamd64_less_unsigned_end:\n\t"
2299 "lea 0x8(%rsp),%rsp");
2303 amd64_emit_ref (int size
)
2308 EMIT_ASM (amd64_ref1
,
2312 EMIT_ASM (amd64_ref2
,
2316 EMIT_ASM (amd64_ref4
,
2317 "movl (%rax),%eax");
2320 EMIT_ASM (amd64_ref8
,
2321 "movq (%rax),%rax");
2327 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2329 EMIT_ASM (amd64_if_goto
,
2333 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2341 amd64_emit_goto (int *offset_p
, int *size_p
)
2343 EMIT_ASM (amd64_goto
,
2344 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2352 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2354 int diff
= (to
- (from
+ size
));
2355 unsigned char buf
[sizeof (int)];
2363 memcpy (buf
, &diff
, sizeof (int));
2364 write_inferior_memory (from
, buf
, sizeof (int));
2368 amd64_emit_const (LONGEST num
)
2370 unsigned char buf
[16];
2372 CORE_ADDR buildaddr
= current_insn_ptr
;
2375 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2376 memcpy (&buf
[i
], &num
, sizeof (num
));
2378 append_insns (&buildaddr
, i
, buf
);
2379 current_insn_ptr
= buildaddr
;
2383 amd64_emit_call (CORE_ADDR fn
)
2385 unsigned char buf
[16];
2387 CORE_ADDR buildaddr
;
2390 /* The destination function being in the shared library, may be
2391 >31-bits away off the compiled code pad. */
2393 buildaddr
= current_insn_ptr
;
2395 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2399 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2401 /* Offset is too large for a call. Use callq, but that requires
2402 a register, so avoid it if possible. Use r10, since it is
2403 call-clobbered, we don't have to push/pop it. */
2404 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2406 memcpy (buf
+ i
, &fn
, 8);
2408 buf
[i
++] = 0xff; /* callq *%r10 */
2413 int offset32
= offset64
; /* we know we can't overflow here. */
2414 memcpy (buf
+ i
, &offset32
, 4);
2418 append_insns (&buildaddr
, i
, buf
);
2419 current_insn_ptr
= buildaddr
;
2423 amd64_emit_reg (int reg
)
2425 unsigned char buf
[16];
2427 CORE_ADDR buildaddr
;
2429 /* Assume raw_regs is still in %rdi. */
2430 buildaddr
= current_insn_ptr
;
2432 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2433 memcpy (&buf
[i
], ®
, sizeof (reg
));
2435 append_insns (&buildaddr
, i
, buf
);
2436 current_insn_ptr
= buildaddr
;
2437 amd64_emit_call (get_raw_reg_func_addr ());
2441 amd64_emit_pop (void)
2443 EMIT_ASM (amd64_pop
,
2448 amd64_emit_stack_flush (void)
2450 EMIT_ASM (amd64_stack_flush
,
2455 amd64_emit_zero_ext (int arg
)
2460 EMIT_ASM (amd64_zero_ext_8
,
2464 EMIT_ASM (amd64_zero_ext_16
,
2465 "and $0xffff,%rax");
2468 EMIT_ASM (amd64_zero_ext_32
,
2469 "mov $0xffffffff,%rcx\n\t"
2478 amd64_emit_swap (void)
2480 EMIT_ASM (amd64_swap
,
2487 amd64_emit_stack_adjust (int n
)
2489 unsigned char buf
[16];
2491 CORE_ADDR buildaddr
= current_insn_ptr
;
2494 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2498 /* This only handles adjustments up to 16, but we don't expect any more. */
2500 append_insns (&buildaddr
, i
, buf
);
2501 current_insn_ptr
= buildaddr
;
2504 /* FN's prototype is `LONGEST(*fn)(int)'. */
2507 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2509 unsigned char buf
[16];
2511 CORE_ADDR buildaddr
;
2513 buildaddr
= current_insn_ptr
;
2515 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2516 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2518 append_insns (&buildaddr
, i
, buf
);
2519 current_insn_ptr
= buildaddr
;
2520 amd64_emit_call (fn
);
2523 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2526 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2528 unsigned char buf
[16];
2530 CORE_ADDR buildaddr
;
2532 buildaddr
= current_insn_ptr
;
2534 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2535 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2537 append_insns (&buildaddr
, i
, buf
);
2538 current_insn_ptr
= buildaddr
;
2539 EMIT_ASM (amd64_void_call_2_a
,
2540 /* Save away a copy of the stack top. */
2542 /* Also pass top as the second argument. */
2544 amd64_emit_call (fn
);
2545 EMIT_ASM (amd64_void_call_2_b
,
2546 /* Restore the stack top, %rax may have been trashed. */
2551 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2554 "cmp %rax,(%rsp)\n\t"
2555 "jne .Lamd64_eq_fallthru\n\t"
2556 "lea 0x8(%rsp),%rsp\n\t"
2558 /* jmp, but don't trust the assembler to choose the right jump */
2559 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2560 ".Lamd64_eq_fallthru:\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2571 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2574 "cmp %rax,(%rsp)\n\t"
2575 "je .Lamd64_ne_fallthru\n\t"
2576 "lea 0x8(%rsp),%rsp\n\t"
2578 /* jmp, but don't trust the assembler to choose the right jump */
2579 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2580 ".Lamd64_ne_fallthru:\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2591 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2594 "cmp %rax,(%rsp)\n\t"
2595 "jnl .Lamd64_lt_fallthru\n\t"
2596 "lea 0x8(%rsp),%rsp\n\t"
2598 /* jmp, but don't trust the assembler to choose the right jump */
2599 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2600 ".Lamd64_lt_fallthru:\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2611 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2614 "cmp %rax,(%rsp)\n\t"
2615 "jnle .Lamd64_le_fallthru\n\t"
2616 "lea 0x8(%rsp),%rsp\n\t"
2618 /* jmp, but don't trust the assembler to choose the right jump */
2619 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2620 ".Lamd64_le_fallthru:\n\t"
2621 "lea 0x8(%rsp),%rsp\n\t"
2631 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2634 "cmp %rax,(%rsp)\n\t"
2635 "jng .Lamd64_gt_fallthru\n\t"
2636 "lea 0x8(%rsp),%rsp\n\t"
2638 /* jmp, but don't trust the assembler to choose the right jump */
2639 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2640 ".Lamd64_gt_fallthru:\n\t"
2641 "lea 0x8(%rsp),%rsp\n\t"
2651 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2654 "cmp %rax,(%rsp)\n\t"
2655 "jnge .Lamd64_ge_fallthru\n\t"
2656 ".Lamd64_ge_jump:\n\t"
2657 "lea 0x8(%rsp),%rsp\n\t"
2659 /* jmp, but don't trust the assembler to choose the right jump */
2660 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2661 ".Lamd64_ge_fallthru:\n\t"
2662 "lea 0x8(%rsp),%rsp\n\t"
2671 struct emit_ops amd64_emit_ops
=
2673 amd64_emit_prologue
,
2674 amd64_emit_epilogue
,
2679 amd64_emit_rsh_signed
,
2680 amd64_emit_rsh_unsigned
,
2688 amd64_emit_less_signed
,
2689 amd64_emit_less_unsigned
,
2693 amd64_write_goto_address
,
2698 amd64_emit_stack_flush
,
2699 amd64_emit_zero_ext
,
2701 amd64_emit_stack_adjust
,
2702 amd64_emit_int_call_1
,
2703 amd64_emit_void_call_2
,
2712 #endif /* __x86_64__ */
2715 i386_emit_prologue (void)
2717 EMIT_ASM32 (i386_prologue
,
2721 /* At this point, the raw regs base address is at 8(%ebp), and the
2722 value pointer is at 12(%ebp). */
2726 i386_emit_epilogue (void)
2728 EMIT_ASM32 (i386_epilogue
,
2729 "mov 12(%ebp),%ecx\n\t"
2730 "mov %eax,(%ecx)\n\t"
2731 "mov %ebx,0x4(%ecx)\n\t"
2739 i386_emit_add (void)
2741 EMIT_ASM32 (i386_add
,
2742 "add (%esp),%eax\n\t"
2743 "adc 0x4(%esp),%ebx\n\t"
2744 "lea 0x8(%esp),%esp");
2748 i386_emit_sub (void)
2750 EMIT_ASM32 (i386_sub
,
2751 "subl %eax,(%esp)\n\t"
2752 "sbbl %ebx,4(%esp)\n\t"
2758 i386_emit_mul (void)
2764 i386_emit_lsh (void)
2770 i386_emit_rsh_signed (void)
2776 i386_emit_rsh_unsigned (void)
2782 i386_emit_ext (int arg
)
2787 EMIT_ASM32 (i386_ext_8
,
2790 "movl %eax,%ebx\n\t"
2794 EMIT_ASM32 (i386_ext_16
,
2796 "movl %eax,%ebx\n\t"
2800 EMIT_ASM32 (i386_ext_32
,
2801 "movl %eax,%ebx\n\t"
2810 i386_emit_log_not (void)
2812 EMIT_ASM32 (i386_log_not
,
2814 "test %eax,%eax\n\t"
2821 i386_emit_bit_and (void)
2823 EMIT_ASM32 (i386_and
,
2824 "and (%esp),%eax\n\t"
2825 "and 0x4(%esp),%ebx\n\t"
2826 "lea 0x8(%esp),%esp");
2830 i386_emit_bit_or (void)
2832 EMIT_ASM32 (i386_or
,
2833 "or (%esp),%eax\n\t"
2834 "or 0x4(%esp),%ebx\n\t"
2835 "lea 0x8(%esp),%esp");
2839 i386_emit_bit_xor (void)
2841 EMIT_ASM32 (i386_xor
,
2842 "xor (%esp),%eax\n\t"
2843 "xor 0x4(%esp),%ebx\n\t"
2844 "lea 0x8(%esp),%esp");
2848 i386_emit_bit_not (void)
2850 EMIT_ASM32 (i386_bit_not
,
2851 "xor $0xffffffff,%eax\n\t"
2852 "xor $0xffffffff,%ebx\n\t");
2856 i386_emit_equal (void)
2858 EMIT_ASM32 (i386_equal
,
2859 "cmpl %ebx,4(%esp)\n\t"
2860 "jne .Li386_equal_false\n\t"
2861 "cmpl %eax,(%esp)\n\t"
2862 "je .Li386_equal_true\n\t"
2863 ".Li386_equal_false:\n\t"
2865 "jmp .Li386_equal_end\n\t"
2866 ".Li386_equal_true:\n\t"
2868 ".Li386_equal_end:\n\t"
2870 "lea 0x8(%esp),%esp");
2874 i386_emit_less_signed (void)
2876 EMIT_ASM32 (i386_less_signed
,
2877 "cmpl %ebx,4(%esp)\n\t"
2878 "jl .Li386_less_signed_true\n\t"
2879 "jne .Li386_less_signed_false\n\t"
2880 "cmpl %eax,(%esp)\n\t"
2881 "jl .Li386_less_signed_true\n\t"
2882 ".Li386_less_signed_false:\n\t"
2884 "jmp .Li386_less_signed_end\n\t"
2885 ".Li386_less_signed_true:\n\t"
2887 ".Li386_less_signed_end:\n\t"
2889 "lea 0x8(%esp),%esp");
2893 i386_emit_less_unsigned (void)
2895 EMIT_ASM32 (i386_less_unsigned
,
2896 "cmpl %ebx,4(%esp)\n\t"
2897 "jb .Li386_less_unsigned_true\n\t"
2898 "jne .Li386_less_unsigned_false\n\t"
2899 "cmpl %eax,(%esp)\n\t"
2900 "jb .Li386_less_unsigned_true\n\t"
2901 ".Li386_less_unsigned_false:\n\t"
2903 "jmp .Li386_less_unsigned_end\n\t"
2904 ".Li386_less_unsigned_true:\n\t"
2906 ".Li386_less_unsigned_end:\n\t"
2908 "lea 0x8(%esp),%esp");
2912 i386_emit_ref (int size
)
2917 EMIT_ASM32 (i386_ref1
,
2921 EMIT_ASM32 (i386_ref2
,
2925 EMIT_ASM32 (i386_ref4
,
2926 "movl (%eax),%eax");
2929 EMIT_ASM32 (i386_ref8
,
2930 "movl 4(%eax),%ebx\n\t"
2931 "movl (%eax),%eax");
2937 i386_emit_if_goto (int *offset_p
, int *size_p
)
2939 EMIT_ASM32 (i386_if_goto
,
2945 /* Don't trust the assembler to choose the right jump */
2946 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2949 *offset_p
= 11; /* be sure that this matches the sequence above */
2955 i386_emit_goto (int *offset_p
, int *size_p
)
2957 EMIT_ASM32 (i386_goto
,
2958 /* Don't trust the assembler to choose the right jump */
2959 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2967 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2969 int diff
= (to
- (from
+ size
));
2970 unsigned char buf
[sizeof (int)];
2972 /* We're only doing 4-byte sizes at the moment. */
2979 memcpy (buf
, &diff
, sizeof (int));
2980 write_inferior_memory (from
, buf
, sizeof (int));
2984 i386_emit_const (LONGEST num
)
2986 unsigned char buf
[16];
2988 CORE_ADDR buildaddr
= current_insn_ptr
;
2991 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2992 lo
= num
& 0xffffffff;
2993 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2995 hi
= ((num
>> 32) & 0xffffffff);
2998 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2999 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3004 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3006 append_insns (&buildaddr
, i
, buf
);
3007 current_insn_ptr
= buildaddr
;
3011 i386_emit_call (CORE_ADDR fn
)
3013 unsigned char buf
[16];
3015 CORE_ADDR buildaddr
;
3017 buildaddr
= current_insn_ptr
;
3019 buf
[i
++] = 0xe8; /* call <reladdr> */
3020 offset
= ((int) fn
) - (buildaddr
+ 5);
3021 memcpy (buf
+ 1, &offset
, 4);
3022 append_insns (&buildaddr
, 5, buf
);
3023 current_insn_ptr
= buildaddr
;
3027 i386_emit_reg (int reg
)
3029 unsigned char buf
[16];
3031 CORE_ADDR buildaddr
;
3033 EMIT_ASM32 (i386_reg_a
,
3035 buildaddr
= current_insn_ptr
;
3037 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3038 memcpy (&buf
[i
], ®
, sizeof (reg
));
3040 append_insns (&buildaddr
, i
, buf
);
3041 current_insn_ptr
= buildaddr
;
3042 EMIT_ASM32 (i386_reg_b
,
3043 "mov %eax,4(%esp)\n\t"
3044 "mov 8(%ebp),%eax\n\t"
3046 i386_emit_call (get_raw_reg_func_addr ());
3047 EMIT_ASM32 (i386_reg_c
,
3049 "lea 0x8(%esp),%esp");
3053 i386_emit_pop (void)
3055 EMIT_ASM32 (i386_pop
,
3061 i386_emit_stack_flush (void)
3063 EMIT_ASM32 (i386_stack_flush
,
3069 i386_emit_zero_ext (int arg
)
3074 EMIT_ASM32 (i386_zero_ext_8
,
3075 "and $0xff,%eax\n\t"
3079 EMIT_ASM32 (i386_zero_ext_16
,
3080 "and $0xffff,%eax\n\t"
3084 EMIT_ASM32 (i386_zero_ext_32
,
3093 i386_emit_swap (void)
3095 EMIT_ASM32 (i386_swap
,
3105 i386_emit_stack_adjust (int n
)
3107 unsigned char buf
[16];
3109 CORE_ADDR buildaddr
= current_insn_ptr
;
3112 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3116 append_insns (&buildaddr
, i
, buf
);
3117 current_insn_ptr
= buildaddr
;
3120 /* FN's prototype is `LONGEST(*fn)(int)'. */
3123 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3125 unsigned char buf
[16];
3127 CORE_ADDR buildaddr
;
3129 EMIT_ASM32 (i386_int_call_1_a
,
3130 /* Reserve a bit of stack space. */
3132 /* Put the one argument on the stack. */
3133 buildaddr
= current_insn_ptr
;
3135 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3138 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3140 append_insns (&buildaddr
, i
, buf
);
3141 current_insn_ptr
= buildaddr
;
3142 i386_emit_call (fn
);
3143 EMIT_ASM32 (i386_int_call_1_c
,
3145 "lea 0x8(%esp),%esp");
3148 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3151 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3153 unsigned char buf
[16];
3155 CORE_ADDR buildaddr
;
3157 EMIT_ASM32 (i386_void_call_2_a
,
3158 /* Preserve %eax only; we don't have to worry about %ebx. */
3160 /* Reserve a bit of stack space for arguments. */
3161 "sub $0x10,%esp\n\t"
3162 /* Copy "top" to the second argument position. (Note that
3163 we can't assume function won't scribble on its
3164 arguments, so don't try to restore from this.) */
3165 "mov %eax,4(%esp)\n\t"
3166 "mov %ebx,8(%esp)");
3167 /* Put the first argument on the stack. */
3168 buildaddr
= current_insn_ptr
;
3170 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3173 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3175 append_insns (&buildaddr
, i
, buf
);
3176 current_insn_ptr
= buildaddr
;
3177 i386_emit_call (fn
);
3178 EMIT_ASM32 (i386_void_call_2_b
,
3179 "lea 0x10(%esp),%esp\n\t"
3180 /* Restore original stack top. */
3186 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3189 /* Check low half first, more likely to be decider */
3190 "cmpl %eax,(%esp)\n\t"
3191 "jne .Leq_fallthru\n\t"
3192 "cmpl %ebx,4(%esp)\n\t"
3193 "jne .Leq_fallthru\n\t"
3194 "lea 0x8(%esp),%esp\n\t"
3197 /* jmp, but don't trust the assembler to choose the right jump */
3198 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3199 ".Leq_fallthru:\n\t"
3200 "lea 0x8(%esp),%esp\n\t"
3211 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3214 /* Check low half first, more likely to be decider */
3215 "cmpl %eax,(%esp)\n\t"
3217 "cmpl %ebx,4(%esp)\n\t"
3218 "je .Lne_fallthru\n\t"
3220 "lea 0x8(%esp),%esp\n\t"
3223 /* jmp, but don't trust the assembler to choose the right jump */
3224 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3225 ".Lne_fallthru:\n\t"
3226 "lea 0x8(%esp),%esp\n\t"
3237 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3240 "cmpl %ebx,4(%esp)\n\t"
3242 "jne .Llt_fallthru\n\t"
3243 "cmpl %eax,(%esp)\n\t"
3244 "jnl .Llt_fallthru\n\t"
3246 "lea 0x8(%esp),%esp\n\t"
3249 /* jmp, but don't trust the assembler to choose the right jump */
3250 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3251 ".Llt_fallthru:\n\t"
3252 "lea 0x8(%esp),%esp\n\t"
3263 i386_emit_le_goto (int *offset_p
, int *size_p
)
3266 "cmpl %ebx,4(%esp)\n\t"
3268 "jne .Lle_fallthru\n\t"
3269 "cmpl %eax,(%esp)\n\t"
3270 "jnle .Lle_fallthru\n\t"
3272 "lea 0x8(%esp),%esp\n\t"
3275 /* jmp, but don't trust the assembler to choose the right jump */
3276 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3277 ".Lle_fallthru:\n\t"
3278 "lea 0x8(%esp),%esp\n\t"
3289 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3292 "cmpl %ebx,4(%esp)\n\t"
3294 "jne .Lgt_fallthru\n\t"
3295 "cmpl %eax,(%esp)\n\t"
3296 "jng .Lgt_fallthru\n\t"
3298 "lea 0x8(%esp),%esp\n\t"
3301 /* jmp, but don't trust the assembler to choose the right jump */
3302 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3303 ".Lgt_fallthru:\n\t"
3304 "lea 0x8(%esp),%esp\n\t"
3315 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3318 "cmpl %ebx,4(%esp)\n\t"
3320 "jne .Lge_fallthru\n\t"
3321 "cmpl %eax,(%esp)\n\t"
3322 "jnge .Lge_fallthru\n\t"
3324 "lea 0x8(%esp),%esp\n\t"
3327 /* jmp, but don't trust the assembler to choose the right jump */
3328 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3329 ".Lge_fallthru:\n\t"
3330 "lea 0x8(%esp),%esp\n\t"
3340 struct emit_ops i386_emit_ops
=
3348 i386_emit_rsh_signed
,
3349 i386_emit_rsh_unsigned
,
3357 i386_emit_less_signed
,
3358 i386_emit_less_unsigned
,
3362 i386_write_goto_address
,
3367 i386_emit_stack_flush
,
3370 i386_emit_stack_adjust
,
3371 i386_emit_int_call_1
,
3372 i386_emit_void_call_2
,
3382 static struct emit_ops
*
3386 if (is_64bit_tdesc ())
3387 return &amd64_emit_ops
;
3390 return &i386_emit_ops
;
3394 x86_supports_range_stepping (void)
3399 /* This is initialized assuming an amd64 target.
3400 x86_arch_setup will correct it for i386 or amd64 targets. */
3402 struct linux_target_ops the_low_target
=
3405 x86_linux_regs_info
,
3406 x86_cannot_fetch_register
,
3407 x86_cannot_store_register
,
3408 NULL
, /* fetch_register */
3416 x86_supports_z_point_type
,
3419 x86_stopped_by_watchpoint
,
3420 x86_stopped_data_address
,
3421 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3422 native i386 case (no registers smaller than an xfer unit), and are not
3423 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3426 /* need to fix up i386 siginfo if host is amd64 */
3428 x86_linux_new_process
,
3429 x86_linux_new_thread
,
3430 x86_linux_prepare_to_resume
,
3431 x86_linux_process_qsupported
,
3432 x86_supports_tracepoints
,
3433 x86_get_thread_area
,
3434 x86_install_fast_tracepoint_jump_pad
,
3436 x86_get_min_fast_tracepoint_insn_len
,
3437 x86_supports_range_stepping
,
3441 initialize_low_arch (void)
3443 /* Initialize the Linux target descriptions. */
3445 init_registers_amd64_linux ();
3446 init_registers_amd64_avx_linux ();
3447 init_registers_amd64_avx512_linux ();
3448 init_registers_amd64_mpx_linux ();
3450 init_registers_x32_linux ();
3451 init_registers_x32_avx_linux ();
3452 init_registers_x32_avx512_linux ();
3454 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3455 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3456 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3458 init_registers_i386_linux ();
3459 init_registers_i386_mmx_linux ();
3460 init_registers_i386_avx_linux ();
3461 init_registers_i386_avx512_linux ();
3462 init_registers_i386_mpx_linux ();
3464 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3465 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3466 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3468 initialize_regsets_info (&x86_regsets_info
);