1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc
*tdesc_amd64_linux
;
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc
*tdesc_amd64_avx_linux
;
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc
*tdesc_x32_linux
;
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc
*tdesc_x32_avx_linux
;
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc
*tdesc_x32_avx512_linux
;
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc
*tdesc_i386_linux
;
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc
*tdesc_i386_mmx_linux
;
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc
*tdesc_i386_avx_linux
;
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc
*tdesc_i386_avx512_linux
;
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc
*tdesc_i386_mpx_linux
;
95 static struct target_desc
*tdesc_amd64_linux_no_xml
;
97 static struct target_desc
*tdesc_i386_linux_no_xml
;
100 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
103 /* Backward compatibility for gdb without XML support. */
105 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
111 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
149 /* Per-process arch-specific data we want to keep. */
151 struct arch_process_info
153 struct x86_debug_reg_state debug_reg_state
;
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap
[] =
163 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
164 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
165 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
166 DS
* 8, ES
* 8, FS
* 8, GS
* 8
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
175 static const int x86_64_regmap
[] =
177 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
178 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
179 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
180 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
181 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
182 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
205 #else /* ! __x86_64__ */
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap
[] =
211 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
212 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
213 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
214 DS
* 4, ES
* 4, FS
* 4, GS
* 4
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225 /* Returns true if the current inferior belongs to a x86-64 process,
229 is_64bit_tdesc (void)
231 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
233 return register_size (regcache
->tdesc
, 0) == 8;
239 /* Called by libthread_db. */
242 ps_get_thread_area (const struct ps_prochandle
*ph
,
243 lwpid_t lwpid
, int idx
, void **base
)
246 int use_64bit
= is_64bit_tdesc ();
253 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
268 unsigned int desc
[4];
270 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
271 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base
= (void *) (uintptr_t) desc
[1];
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
286 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
289 int use_64bit
= is_64bit_tdesc ();
294 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
296 *addr
= (CORE_ADDR
) (uintptr_t) base
;
305 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
306 struct thread_info
*thr
= get_lwp_thread (lwp
);
307 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
308 unsigned int desc
[4];
310 const int reg_thread_area
= 3; /* bits to scale down register value. */
313 collect_register_by_name (regcache
, "gs", &gs
);
315 idx
= gs
>> reg_thread_area
;
317 if (ptrace (PTRACE_GET_THREAD_AREA
,
319 (void *) (long) idx
, (unsigned long) &desc
) < 0)
330 x86_cannot_store_register (int regno
)
333 if (is_64bit_tdesc ())
337 return regno
>= I386_NUM_REGS
;
341 x86_cannot_fetch_register (int regno
)
344 if (is_64bit_tdesc ())
348 return regno
>= I386_NUM_REGS
;
352 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
357 if (register_size (regcache
->tdesc
, 0) == 8)
359 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
360 if (x86_64_regmap
[i
] != -1)
361 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
370 for (i
= 0; i
< I386_NUM_REGS
; i
++)
371 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
373 collect_register_by_name (regcache
, "orig_eax",
374 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
378 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
383 if (register_size (regcache
->tdesc
, 0) == 8)
385 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
386 if (x86_64_regmap
[i
] != -1)
387 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
392 for (i
= 0; i
< I386_NUM_REGS
; i
++)
393 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
395 supply_register_by_name (regcache
, "orig_eax",
396 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
400 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
403 i387_cache_to_fxsave (regcache
, buf
);
405 i387_cache_to_fsave (regcache
, buf
);
410 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
413 i387_fxsave_to_cache (regcache
, buf
);
415 i387_fsave_to_cache (regcache
, buf
);
422 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
424 i387_cache_to_fxsave (regcache
, buf
);
428 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
430 i387_fxsave_to_cache (regcache
, buf
);
436 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
438 i387_cache_to_xsave (regcache
, buf
);
442 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
444 i387_xsave_to_cache (regcache
, buf
);
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
454 static struct regset_info x86_regsets
[] =
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
459 x86_fill_gregset
, x86_store_gregset
},
460 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
461 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
466 x86_fill_fpxregset
, x86_store_fpxregset
},
469 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
471 x86_fill_fpregset
, x86_store_fpregset
},
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL
, NULL
}
477 x86_get_pc (struct regcache
*regcache
)
479 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
484 collect_register_by_name (regcache
, "rip", &pc
);
485 return (CORE_ADDR
) pc
;
490 collect_register_by_name (regcache
, "eip", &pc
);
491 return (CORE_ADDR
) pc
;
496 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
498 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
502 unsigned long newpc
= pc
;
503 supply_register_by_name (regcache
, "rip", &newpc
);
507 unsigned int newpc
= pc
;
508 supply_register_by_name (regcache
, "eip", &newpc
);
512 static const unsigned char x86_breakpoint
[] = { 0xCC };
513 #define x86_breakpoint_len 1
516 x86_breakpoint_at (CORE_ADDR pc
)
520 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Return the offset of REGNUM in the u_debugreg field of struct
532 u_debugreg_offset (int regnum
)
534 return (offsetof (struct user
, u_debugreg
)
535 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
539 /* Support for debug registers. */
541 /* Get debug register REGNUM value from the LWP specified by PTID. */
544 x86_linux_dr_get (ptid_t ptid
, int regnum
)
549 gdb_assert (ptid_lwp_p (ptid
));
550 tid
= ptid_get_lwp (ptid
);
553 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
555 perror_with_name (_("Couldn't read debug register"));
560 /* Set debug register REGNUM to VALUE in the LWP specified by PTID. */
563 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
567 gdb_assert (ptid_lwp_p (ptid
));
568 tid
= ptid_get_lwp (ptid
);
571 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
573 perror_with_name (_("Couldn't write debug register"));
576 /* Callback for iterate_over_lwps. Mark that our local mirror of
577 LWP's debug registers has been changed, and cause LWP to stop if
578 it isn't already. Values are written from our local mirror to
579 the actual debug registers immediately prior to LWP resuming. */
582 update_debug_registers_callback (struct lwp_info
*lwp
, void *arg
)
584 lwp_set_debug_registers_changed (lwp
, 1);
586 if (!lwp_is_stopped (lwp
))
587 linux_stop_lwp (lwp
);
589 /* Continue the iteration. */
593 /* Store ADDR in debug register REGNUM of all LWPs of the current
597 x86_linux_dr_set_addr (int regnum
, CORE_ADDR addr
)
599 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
601 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
603 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
606 /* Return the address stored in the current inferior's debug register
610 x86_linux_dr_get_addr (int regnum
)
612 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
614 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
617 /* Store CONTROL in the debug control registers of all LWPs of the
621 x86_linux_dr_set_control (unsigned long control
)
623 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
625 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
628 /* Return the value stored in the current inferior's debug control
632 x86_linux_dr_get_control (void)
634 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
637 /* Return the value stored in the current inferior's debug status
641 x86_linux_dr_get_status (void)
643 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
646 /* Low-level function vector. */
647 struct x86_dr_low_type x86_dr_low
=
649 x86_linux_dr_set_control
,
650 x86_linux_dr_set_addr
,
651 x86_linux_dr_get_addr
,
652 x86_linux_dr_get_status
,
653 x86_linux_dr_get_control
,
657 /* Breakpoint/Watchpoint support. */
660 x86_supports_z_point_type (char z_type
)
666 case Z_PACKET_WRITE_WP
:
667 case Z_PACKET_ACCESS_WP
:
675 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
676 int size
, struct raw_breakpoint
*bp
)
678 struct process_info
*proc
= current_process ();
682 case raw_bkpt_type_sw
:
683 return insert_memory_breakpoint (bp
);
685 case raw_bkpt_type_hw
:
686 case raw_bkpt_type_write_wp
:
687 case raw_bkpt_type_access_wp
:
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type
);
691 struct x86_debug_reg_state
*state
692 = &proc
->priv
->arch_private
->debug_reg_state
;
694 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
704 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
705 int size
, struct raw_breakpoint
*bp
)
707 struct process_info
*proc
= current_process ();
711 case raw_bkpt_type_sw
:
712 return remove_memory_breakpoint (bp
);
714 case raw_bkpt_type_hw
:
715 case raw_bkpt_type_write_wp
:
716 case raw_bkpt_type_access_wp
:
718 enum target_hw_bp_type hw_type
719 = raw_bkpt_type_to_target_hw_bp_type (type
);
720 struct x86_debug_reg_state
*state
721 = &proc
->priv
->arch_private
->debug_reg_state
;
723 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
732 x86_stopped_by_watchpoint (void)
734 struct process_info
*proc
= current_process ();
735 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
739 x86_stopped_data_address (void)
741 struct process_info
*proc
= current_process ();
743 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
749 /* Called when a new process is created. */
751 static struct arch_process_info
*
752 x86_linux_new_process (void)
754 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
756 x86_low_init_dregs (&info
->debug_reg_state
);
761 /* Called when a new thread is detected. */
764 x86_linux_new_thread (struct lwp_info
*lwp
)
766 lwp_set_debug_registers_changed (lwp
, 1);
769 /* See nat/x86-dregs.h. */
771 struct x86_debug_reg_state
*
772 x86_debug_reg_state (pid_t pid
)
774 struct process_info
*proc
= find_process_pid (pid
);
776 return &proc
->priv
->arch_private
->debug_reg_state
;
779 /* Called prior to resuming a thread. Updates the thread's debug
780 registers if the values in our local mirror have been changed. */
783 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
785 ptid_t ptid
= ptid_of_lwp (lwp
);
786 int clear_status
= 0;
788 if (lwp_debug_registers_changed (lwp
))
790 struct x86_debug_reg_state
*state
791 = x86_debug_reg_state (ptid_get_pid (ptid
));
794 /* Prior to Linux kernel 2.6.33 commit
795 72f674d203cd230426437cdcf7dd6f681dad8b0d, setting DR0-3 to
796 a value that did not match what was enabled in DR_CONTROL
797 resulted in EINVAL. To avoid this we zero DR_CONTROL before
798 writing address registers, only writing DR_CONTROL's actual
799 value once all the addresses are in place. */
800 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
802 ALL_DEBUG_ADDRESS_REGISTERS (i
)
803 if (state
->dr_ref_count
[i
] > 0)
805 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
807 /* If we're setting a watchpoint, any change the inferior
808 has made to its debug registers needs to be discarded
809 to avoid x86_stopped_data_address getting confused. */
813 /* If DR_CONTROL is supposed to be zero then it's already set. */
814 if (state
->dr_control_mirror
!= 0)
815 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
817 lwp_set_debug_registers_changed (lwp
, 0);
821 || lwp_stop_reason (lwp
) == TARGET_STOPPED_BY_WATCHPOINT
)
822 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
825 /* When GDBSERVER is built as a 64-bit application on linux, the
826 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
827 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
828 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
829 conversion in-place ourselves. */
831 /* These types below (compat_*) define a siginfo type that is layout
832 compatible with the siginfo type exported by the 32-bit userspace
837 typedef int compat_int_t
;
838 typedef unsigned int compat_uptr_t
;
840 typedef int compat_time_t
;
841 typedef int compat_timer_t
;
842 typedef int compat_clock_t
;
844 struct compat_timeval
846 compat_time_t tv_sec
;
850 typedef union compat_sigval
852 compat_int_t sival_int
;
853 compat_uptr_t sival_ptr
;
856 typedef struct compat_siginfo
864 int _pad
[((128 / sizeof (int)) - 3)];
873 /* POSIX.1b timers */
878 compat_sigval_t _sigval
;
881 /* POSIX.1b signals */
886 compat_sigval_t _sigval
;
895 compat_clock_t _utime
;
896 compat_clock_t _stime
;
899 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
914 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
915 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
917 typedef struct compat_x32_siginfo
925 int _pad
[((128 / sizeof (int)) - 3)];
934 /* POSIX.1b timers */
939 compat_sigval_t _sigval
;
942 /* POSIX.1b signals */
947 compat_sigval_t _sigval
;
956 compat_x32_clock_t _utime
;
957 compat_x32_clock_t _stime
;
960 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
973 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
975 #define cpt_si_pid _sifields._kill._pid
976 #define cpt_si_uid _sifields._kill._uid
977 #define cpt_si_timerid _sifields._timer._tid
978 #define cpt_si_overrun _sifields._timer._overrun
979 #define cpt_si_status _sifields._sigchld._status
980 #define cpt_si_utime _sifields._sigchld._utime
981 #define cpt_si_stime _sifields._sigchld._stime
982 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
983 #define cpt_si_addr _sifields._sigfault._addr
984 #define cpt_si_band _sifields._sigpoll._band
985 #define cpt_si_fd _sifields._sigpoll._fd
987 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
988 In their place is si_timer1,si_timer2. */
990 #define si_timerid si_timer1
993 #define si_overrun si_timer2
997 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
999 memset (to
, 0, sizeof (*to
));
1001 to
->si_signo
= from
->si_signo
;
1002 to
->si_errno
= from
->si_errno
;
1003 to
->si_code
= from
->si_code
;
1005 if (to
->si_code
== SI_TIMER
)
1007 to
->cpt_si_timerid
= from
->si_timerid
;
1008 to
->cpt_si_overrun
= from
->si_overrun
;
1009 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1011 else if (to
->si_code
== SI_USER
)
1013 to
->cpt_si_pid
= from
->si_pid
;
1014 to
->cpt_si_uid
= from
->si_uid
;
1016 else if (to
->si_code
< 0)
1018 to
->cpt_si_pid
= from
->si_pid
;
1019 to
->cpt_si_uid
= from
->si_uid
;
1020 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1024 switch (to
->si_signo
)
1027 to
->cpt_si_pid
= from
->si_pid
;
1028 to
->cpt_si_uid
= from
->si_uid
;
1029 to
->cpt_si_status
= from
->si_status
;
1030 to
->cpt_si_utime
= from
->si_utime
;
1031 to
->cpt_si_stime
= from
->si_stime
;
1037 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1040 to
->cpt_si_band
= from
->si_band
;
1041 to
->cpt_si_fd
= from
->si_fd
;
1044 to
->cpt_si_pid
= from
->si_pid
;
1045 to
->cpt_si_uid
= from
->si_uid
;
1046 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1053 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1055 memset (to
, 0, sizeof (*to
));
1057 to
->si_signo
= from
->si_signo
;
1058 to
->si_errno
= from
->si_errno
;
1059 to
->si_code
= from
->si_code
;
1061 if (to
->si_code
== SI_TIMER
)
1063 to
->si_timerid
= from
->cpt_si_timerid
;
1064 to
->si_overrun
= from
->cpt_si_overrun
;
1065 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1067 else if (to
->si_code
== SI_USER
)
1069 to
->si_pid
= from
->cpt_si_pid
;
1070 to
->si_uid
= from
->cpt_si_uid
;
1072 else if (to
->si_code
< 0)
1074 to
->si_pid
= from
->cpt_si_pid
;
1075 to
->si_uid
= from
->cpt_si_uid
;
1076 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1080 switch (to
->si_signo
)
1083 to
->si_pid
= from
->cpt_si_pid
;
1084 to
->si_uid
= from
->cpt_si_uid
;
1085 to
->si_status
= from
->cpt_si_status
;
1086 to
->si_utime
= from
->cpt_si_utime
;
1087 to
->si_stime
= from
->cpt_si_stime
;
1093 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1096 to
->si_band
= from
->cpt_si_band
;
1097 to
->si_fd
= from
->cpt_si_fd
;
1100 to
->si_pid
= from
->cpt_si_pid
;
1101 to
->si_uid
= from
->cpt_si_uid
;
1102 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1109 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1112 memset (to
, 0, sizeof (*to
));
1114 to
->si_signo
= from
->si_signo
;
1115 to
->si_errno
= from
->si_errno
;
1116 to
->si_code
= from
->si_code
;
1118 if (to
->si_code
== SI_TIMER
)
1120 to
->cpt_si_timerid
= from
->si_timerid
;
1121 to
->cpt_si_overrun
= from
->si_overrun
;
1122 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1124 else if (to
->si_code
== SI_USER
)
1126 to
->cpt_si_pid
= from
->si_pid
;
1127 to
->cpt_si_uid
= from
->si_uid
;
1129 else if (to
->si_code
< 0)
1131 to
->cpt_si_pid
= from
->si_pid
;
1132 to
->cpt_si_uid
= from
->si_uid
;
1133 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1137 switch (to
->si_signo
)
1140 to
->cpt_si_pid
= from
->si_pid
;
1141 to
->cpt_si_uid
= from
->si_uid
;
1142 to
->cpt_si_status
= from
->si_status
;
1143 to
->cpt_si_utime
= from
->si_utime
;
1144 to
->cpt_si_stime
= from
->si_stime
;
1150 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1153 to
->cpt_si_band
= from
->si_band
;
1154 to
->cpt_si_fd
= from
->si_fd
;
1157 to
->cpt_si_pid
= from
->si_pid
;
1158 to
->cpt_si_uid
= from
->si_uid
;
1159 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1166 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1167 compat_x32_siginfo_t
*from
)
1169 memset (to
, 0, sizeof (*to
));
1171 to
->si_signo
= from
->si_signo
;
1172 to
->si_errno
= from
->si_errno
;
1173 to
->si_code
= from
->si_code
;
1175 if (to
->si_code
== SI_TIMER
)
1177 to
->si_timerid
= from
->cpt_si_timerid
;
1178 to
->si_overrun
= from
->cpt_si_overrun
;
1179 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1181 else if (to
->si_code
== SI_USER
)
1183 to
->si_pid
= from
->cpt_si_pid
;
1184 to
->si_uid
= from
->cpt_si_uid
;
1186 else if (to
->si_code
< 0)
1188 to
->si_pid
= from
->cpt_si_pid
;
1189 to
->si_uid
= from
->cpt_si_uid
;
1190 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1194 switch (to
->si_signo
)
1197 to
->si_pid
= from
->cpt_si_pid
;
1198 to
->si_uid
= from
->cpt_si_uid
;
1199 to
->si_status
= from
->cpt_si_status
;
1200 to
->si_utime
= from
->cpt_si_utime
;
1201 to
->si_stime
= from
->cpt_si_stime
;
1207 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1210 to
->si_band
= from
->cpt_si_band
;
1211 to
->si_fd
= from
->cpt_si_fd
;
1214 to
->si_pid
= from
->cpt_si_pid
;
1215 to
->si_uid
= from
->cpt_si_uid
;
1216 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1222 #endif /* __x86_64__ */
1224 /* Convert a native/host siginfo object, into/from the siginfo in the
1225 layout of the inferiors' architecture. Returns true if any
1226 conversion was done; false otherwise. If DIRECTION is 1, then copy
1227 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1231 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1234 unsigned int machine
;
1235 int tid
= lwpid_of (current_thread
);
1236 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1238 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1239 if (!is_64bit_tdesc ())
1241 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1244 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1246 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1250 /* No fixup for native x32 GDB. */
1251 else if (!is_elf64
&& sizeof (void *) == 8)
1253 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1256 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1259 siginfo_from_compat_x32_siginfo (native
,
1260 (struct compat_x32_siginfo
*) inf
);
1271 /* Format of XSAVE extended state is:
1274 fxsave_bytes[0..463]
1275 sw_usable_bytes[464..511]
1276 xstate_hdr_bytes[512..575]
1281 Same memory layout will be used for the coredump NT_X86_XSTATE
1282 representing the XSAVE extended state registers.
1284 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1285 extended state mask, which is the same as the extended control register
1286 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1287 together with the mask saved in the xstate_hdr_bytes to determine what
1288 states the processor/OS supports and what state, used or initialized,
1289 the process/thread is in. */
1290 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1292 /* Does the current host support the GETFPXREGS request? The header
1293 file may or may not define it, and even if it is defined, the
1294 kernel will return EIO if it's running on a pre-SSE processor. */
1295 int have_ptrace_getfpxregs
=
1296 #ifdef HAVE_PTRACE_GETFPXREGS
1303 /* Does the current host support PTRACE_GETREGSET? */
1304 static int have_ptrace_getregset
= -1;
1306 /* Get Linux/x86 target description from running target. */
1308 static const struct target_desc
*
1309 x86_linux_read_description (void)
1311 unsigned int machine
;
1315 static uint64_t xcr0
;
1316 struct regset_info
*regset
;
1318 tid
= lwpid_of (current_thread
);
1320 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1322 if (sizeof (void *) == 4)
1325 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1327 else if (machine
== EM_X86_64
)
1328 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1332 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1333 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1335 elf_fpxregset_t fpxregs
;
1337 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1339 have_ptrace_getfpxregs
= 0;
1340 have_ptrace_getregset
= 0;
1341 return tdesc_i386_mmx_linux
;
1344 have_ptrace_getfpxregs
= 1;
1350 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1352 /* Don't use XML. */
1354 if (machine
== EM_X86_64
)
1355 return tdesc_amd64_linux_no_xml
;
1358 return tdesc_i386_linux_no_xml
;
1361 if (have_ptrace_getregset
== -1)
1363 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1366 iov
.iov_base
= xstateregs
;
1367 iov
.iov_len
= sizeof (xstateregs
);
1369 /* Check if PTRACE_GETREGSET works. */
1370 if (ptrace (PTRACE_GETREGSET
, tid
,
1371 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1372 have_ptrace_getregset
= 0;
1375 have_ptrace_getregset
= 1;
1377 /* Get XCR0 from XSAVE extended state. */
1378 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1379 / sizeof (uint64_t))];
1381 /* Use PTRACE_GETREGSET if it is available. */
1382 for (regset
= x86_regsets
;
1383 regset
->fill_function
!= NULL
; regset
++)
1384 if (regset
->get_request
== PTRACE_GETREGSET
)
1385 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1386 else if (regset
->type
!= GENERAL_REGS
)
1391 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1392 xcr0_features
= (have_ptrace_getregset
1393 && (xcr0
& X86_XSTATE_ALL_MASK
));
1398 if (machine
== EM_X86_64
)
1405 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1407 case X86_XSTATE_AVX512_MASK
:
1408 return tdesc_amd64_avx512_linux
;
1410 case X86_XSTATE_MPX_MASK
:
1411 return tdesc_amd64_mpx_linux
;
1413 case X86_XSTATE_AVX_MASK
:
1414 return tdesc_amd64_avx_linux
;
1417 return tdesc_amd64_linux
;
1421 return tdesc_amd64_linux
;
1427 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1429 case X86_XSTATE_AVX512_MASK
:
1430 return tdesc_x32_avx512_linux
;
1432 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1433 case X86_XSTATE_AVX_MASK
:
1434 return tdesc_x32_avx_linux
;
1437 return tdesc_x32_linux
;
1441 return tdesc_x32_linux
;
1449 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1451 case (X86_XSTATE_AVX512_MASK
):
1452 return tdesc_i386_avx512_linux
;
1454 case (X86_XSTATE_MPX_MASK
):
1455 return tdesc_i386_mpx_linux
;
1457 case (X86_XSTATE_AVX_MASK
):
1458 return tdesc_i386_avx_linux
;
1461 return tdesc_i386_linux
;
1465 return tdesc_i386_linux
;
1468 gdb_assert_not_reached ("failed to return tdesc");
1471 /* Callback for find_inferior. Stops iteration when a thread with a
1472 given PID is found. */
1475 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1477 int pid
= *(int *) data
;
1479 return (ptid_get_pid (entry
->id
) == pid
);
1482 /* Callback for for_each_inferior. Calls the arch_setup routine for
1486 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1488 int pid
= ptid_get_pid (entry
->id
);
1490 /* Look up any thread of this processes. */
1492 = (struct thread_info
*) find_inferior (&all_threads
,
1493 same_process_callback
, &pid
);
1495 the_low_target
.arch_setup ();
1498 /* Update all the target description of all processes; a new GDB
1499 connected, and it may or not support xml target descriptions. */
1502 x86_linux_update_xmltarget (void)
1504 struct thread_info
*saved_thread
= current_thread
;
1506 /* Before changing the register cache's internal layout, flush the
1507 contents of the current valid caches back to the threads, and
1508 release the current regcache objects. */
1509 regcache_release ();
1511 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1513 current_thread
= saved_thread
;
1516 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1517 PTRACE_GETREGSET. */
1520 x86_linux_process_qsupported (const char *query
)
1522 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1523 with "i386" in qSupported query, it supports x86 XML target
1526 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1528 char *copy
= xstrdup (query
+ 13);
1531 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1533 if (strcmp (p
, "i386") == 0)
1543 x86_linux_update_xmltarget ();
1546 /* Common for x86/x86-64. */
1548 static struct regsets_info x86_regsets_info
=
1550 x86_regsets
, /* regsets */
1551 0, /* num_regsets */
1552 NULL
, /* disabled_regsets */
1556 static struct regs_info amd64_linux_regs_info
=
1558 NULL
, /* regset_bitmap */
1559 NULL
, /* usrregs_info */
1563 static struct usrregs_info i386_linux_usrregs_info
=
1569 static struct regs_info i386_linux_regs_info
=
1571 NULL
, /* regset_bitmap */
1572 &i386_linux_usrregs_info
,
1576 const struct regs_info
*
1577 x86_linux_regs_info (void)
1580 if (is_64bit_tdesc ())
1581 return &amd64_linux_regs_info
;
1584 return &i386_linux_regs_info
;
1587 /* Initialize the target description for the architecture of the
1591 x86_arch_setup (void)
1593 current_process ()->tdesc
= x86_linux_read_description ();
1597 x86_supports_tracepoints (void)
1603 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1605 write_inferior_memory (*to
, buf
, len
);
1610 push_opcode (unsigned char *buf
, char *op
)
1612 unsigned char *buf_org
= buf
;
1617 unsigned long ul
= strtoul (op
, &endptr
, 16);
1626 return buf
- buf_org
;
1631 /* Build a jump pad that saves registers and calls a collection
1632 function. Writes a jump instruction to the jump pad to
1633 JJUMPAD_INSN. The caller is responsible to write it in at the
1634 tracepoint address. */
1637 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1638 CORE_ADDR collector
,
1641 CORE_ADDR
*jump_entry
,
1642 CORE_ADDR
*trampoline
,
1643 ULONGEST
*trampoline_size
,
1644 unsigned char *jjump_pad_insn
,
1645 ULONGEST
*jjump_pad_insn_size
,
1646 CORE_ADDR
*adjusted_insn_addr
,
1647 CORE_ADDR
*adjusted_insn_addr_end
,
1650 unsigned char buf
[40];
1654 CORE_ADDR buildaddr
= *jump_entry
;
1656 /* Build the jump pad. */
1658 /* First, do tracepoint data collection. Save registers. */
1660 /* Need to ensure stack pointer saved first. */
1661 buf
[i
++] = 0x54; /* push %rsp */
1662 buf
[i
++] = 0x55; /* push %rbp */
1663 buf
[i
++] = 0x57; /* push %rdi */
1664 buf
[i
++] = 0x56; /* push %rsi */
1665 buf
[i
++] = 0x52; /* push %rdx */
1666 buf
[i
++] = 0x51; /* push %rcx */
1667 buf
[i
++] = 0x53; /* push %rbx */
1668 buf
[i
++] = 0x50; /* push %rax */
1669 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1670 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1671 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1672 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1673 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1674 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1675 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1676 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1677 buf
[i
++] = 0x9c; /* pushfq */
1678 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1680 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1681 i
+= sizeof (unsigned long);
1682 buf
[i
++] = 0x57; /* push %rdi */
1683 append_insns (&buildaddr
, i
, buf
);
1685 /* Stack space for the collecting_t object. */
1687 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1688 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1689 memcpy (buf
+ i
, &tpoint
, 8);
1691 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1692 i
+= push_opcode (&buf
[i
],
1693 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1694 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1695 append_insns (&buildaddr
, i
, buf
);
1699 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1700 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1702 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1703 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1704 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1705 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1706 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1707 append_insns (&buildaddr
, i
, buf
);
1709 /* Set up the gdb_collect call. */
1710 /* At this point, (stack pointer + 0x18) is the base of our saved
1714 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1715 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1717 /* tpoint address may be 64-bit wide. */
1718 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1719 memcpy (buf
+ i
, &tpoint
, 8);
1721 append_insns (&buildaddr
, i
, buf
);
1723 /* The collector function being in the shared library, may be
1724 >31-bits away off the jump pad. */
1726 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1727 memcpy (buf
+ i
, &collector
, 8);
1729 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1730 append_insns (&buildaddr
, i
, buf
);
1732 /* Clear the spin-lock. */
1734 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1735 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1736 memcpy (buf
+ i
, &lockaddr
, 8);
1738 append_insns (&buildaddr
, i
, buf
);
1740 /* Remove stack that had been used for the collect_t object. */
1742 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1743 append_insns (&buildaddr
, i
, buf
);
1745 /* Restore register state. */
1747 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1751 buf
[i
++] = 0x9d; /* popfq */
1752 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1753 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1754 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1755 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1756 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1757 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1758 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1759 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1760 buf
[i
++] = 0x58; /* pop %rax */
1761 buf
[i
++] = 0x5b; /* pop %rbx */
1762 buf
[i
++] = 0x59; /* pop %rcx */
1763 buf
[i
++] = 0x5a; /* pop %rdx */
1764 buf
[i
++] = 0x5e; /* pop %rsi */
1765 buf
[i
++] = 0x5f; /* pop %rdi */
1766 buf
[i
++] = 0x5d; /* pop %rbp */
1767 buf
[i
++] = 0x5c; /* pop %rsp */
1768 append_insns (&buildaddr
, i
, buf
);
1770 /* Now, adjust the original instruction to execute in the jump
1772 *adjusted_insn_addr
= buildaddr
;
1773 relocate_instruction (&buildaddr
, tpaddr
);
1774 *adjusted_insn_addr_end
= buildaddr
;
1776 /* Finally, write a jump back to the program. */
1778 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1779 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1782 "E.Jump back from jump pad too far from tracepoint "
1783 "(offset 0x%" PRIx64
" > int32).", loffset
);
1787 offset
= (int) loffset
;
1788 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1789 memcpy (buf
+ 1, &offset
, 4);
1790 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1792 /* The jump pad is now built. Wire in a jump to our jump pad. This
1793 is always done last (by our caller actually), so that we can
1794 install fast tracepoints with threads running. This relies on
1795 the agent's atomic write support. */
1796 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1797 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1800 "E.Jump pad too far from tracepoint "
1801 "(offset 0x%" PRIx64
" > int32).", loffset
);
1805 offset
= (int) loffset
;
1807 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1808 memcpy (buf
+ 1, &offset
, 4);
1809 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1810 *jjump_pad_insn_size
= sizeof (jump_insn
);
1812 /* Return the end address of our pad. */
1813 *jump_entry
= buildaddr
;
1818 #endif /* __x86_64__ */
1820 /* Build a jump pad that saves registers and calls a collection
1821 function. Writes a jump instruction to the jump pad to
1822 JJUMPAD_INSN. The caller is responsible to write it in at the
1823 tracepoint address. */
1826 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1827 CORE_ADDR collector
,
1830 CORE_ADDR
*jump_entry
,
1831 CORE_ADDR
*trampoline
,
1832 ULONGEST
*trampoline_size
,
1833 unsigned char *jjump_pad_insn
,
1834 ULONGEST
*jjump_pad_insn_size
,
1835 CORE_ADDR
*adjusted_insn_addr
,
1836 CORE_ADDR
*adjusted_insn_addr_end
,
1839 unsigned char buf
[0x100];
1841 CORE_ADDR buildaddr
= *jump_entry
;
1843 /* Build the jump pad. */
1845 /* First, do tracepoint data collection. Save registers. */
1847 buf
[i
++] = 0x60; /* pushad */
1848 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1849 *((int *)(buf
+ i
)) = (int) tpaddr
;
1851 buf
[i
++] = 0x9c; /* pushf */
1852 buf
[i
++] = 0x1e; /* push %ds */
1853 buf
[i
++] = 0x06; /* push %es */
1854 buf
[i
++] = 0x0f; /* push %fs */
1856 buf
[i
++] = 0x0f; /* push %gs */
1858 buf
[i
++] = 0x16; /* push %ss */
1859 buf
[i
++] = 0x0e; /* push %cs */
1860 append_insns (&buildaddr
, i
, buf
);
1862 /* Stack space for the collecting_t object. */
1864 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1866 /* Build the object. */
1867 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1868 memcpy (buf
+ i
, &tpoint
, 4);
1870 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1872 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1873 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1874 append_insns (&buildaddr
, i
, buf
);
1876 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1877 If we cared for it, this could be using xchg alternatively. */
1880 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1881 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1883 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1885 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1886 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1887 append_insns (&buildaddr
, i
, buf
);
1890 /* Set up arguments to the gdb_collect call. */
1892 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1893 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1894 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1895 append_insns (&buildaddr
, i
, buf
);
1898 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1899 append_insns (&buildaddr
, i
, buf
);
1902 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1903 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1905 append_insns (&buildaddr
, i
, buf
);
1907 buf
[0] = 0xe8; /* call <reladdr> */
1908 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1909 memcpy (buf
+ 1, &offset
, 4);
1910 append_insns (&buildaddr
, 5, buf
);
1911 /* Clean up after the call. */
1912 buf
[0] = 0x83; /* add $0x8,%esp */
1915 append_insns (&buildaddr
, 3, buf
);
1918 /* Clear the spin-lock. This would need the LOCK prefix on older
1921 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1922 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1923 memcpy (buf
+ i
, &lockaddr
, 4);
1925 append_insns (&buildaddr
, i
, buf
);
1928 /* Remove stack that had been used for the collect_t object. */
1930 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1931 append_insns (&buildaddr
, i
, buf
);
1934 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1937 buf
[i
++] = 0x17; /* pop %ss */
1938 buf
[i
++] = 0x0f; /* pop %gs */
1940 buf
[i
++] = 0x0f; /* pop %fs */
1942 buf
[i
++] = 0x07; /* pop %es */
1943 buf
[i
++] = 0x1f; /* pop %ds */
1944 buf
[i
++] = 0x9d; /* popf */
1945 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1948 buf
[i
++] = 0x61; /* popad */
1949 append_insns (&buildaddr
, i
, buf
);
1951 /* Now, adjust the original instruction to execute in the jump
1953 *adjusted_insn_addr
= buildaddr
;
1954 relocate_instruction (&buildaddr
, tpaddr
);
1955 *adjusted_insn_addr_end
= buildaddr
;
1957 /* Write the jump back to the program. */
1958 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1959 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1960 memcpy (buf
+ 1, &offset
, 4);
1961 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1963 /* The jump pad is now built. Wire in a jump to our jump pad. This
1964 is always done last (by our caller actually), so that we can
1965 install fast tracepoints with threads running. This relies on
1966 the agent's atomic write support. */
1969 /* Create a trampoline. */
1970 *trampoline_size
= sizeof (jump_insn
);
1971 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1973 /* No trampoline space available. */
1975 "E.Cannot allocate trampoline space needed for fast "
1976 "tracepoints on 4-byte instructions.");
1980 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1981 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1982 memcpy (buf
+ 1, &offset
, 4);
1983 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1985 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1986 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1987 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1988 memcpy (buf
+ 2, &offset
, 2);
1989 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1990 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1994 /* Else use a 32-bit relative jump instruction. */
1995 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1996 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1997 memcpy (buf
+ 1, &offset
, 4);
1998 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1999 *jjump_pad_insn_size
= sizeof (jump_insn
);
2002 /* Return the end address of our pad. */
2003 *jump_entry
= buildaddr
;
2009 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2010 CORE_ADDR collector
,
2013 CORE_ADDR
*jump_entry
,
2014 CORE_ADDR
*trampoline
,
2015 ULONGEST
*trampoline_size
,
2016 unsigned char *jjump_pad_insn
,
2017 ULONGEST
*jjump_pad_insn_size
,
2018 CORE_ADDR
*adjusted_insn_addr
,
2019 CORE_ADDR
*adjusted_insn_addr_end
,
2023 if (is_64bit_tdesc ())
2024 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2025 collector
, lockaddr
,
2026 orig_size
, jump_entry
,
2027 trampoline
, trampoline_size
,
2029 jjump_pad_insn_size
,
2031 adjusted_insn_addr_end
,
2035 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2036 collector
, lockaddr
,
2037 orig_size
, jump_entry
,
2038 trampoline
, trampoline_size
,
2040 jjump_pad_insn_size
,
2042 adjusted_insn_addr_end
,
2046 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2050 x86_get_min_fast_tracepoint_insn_len (void)
2052 static int warned_about_fast_tracepoints
= 0;
2055 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2056 used for fast tracepoints. */
2057 if (is_64bit_tdesc ())
2061 if (agent_loaded_p ())
2063 char errbuf
[IPA_BUFSIZ
];
2067 /* On x86, if trampolines are available, then 4-byte jump instructions
2068 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2069 with a 4-byte offset are used instead. */
2070 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2074 /* GDB has no channel to explain to user why a shorter fast
2075 tracepoint is not possible, but at least make GDBserver
2076 mention that something has gone awry. */
2077 if (!warned_about_fast_tracepoints
)
2079 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2080 warned_about_fast_tracepoints
= 1;
2087 /* Indicate that the minimum length is currently unknown since the IPA
2088 has not loaded yet. */
2094 add_insns (unsigned char *start
, int len
)
2096 CORE_ADDR buildaddr
= current_insn_ptr
;
2099 debug_printf ("Adding %d bytes of insn at %s\n",
2100 len
, paddress (buildaddr
));
2102 append_insns (&buildaddr
, len
, start
);
2103 current_insn_ptr
= buildaddr
;
2106 /* Our general strategy for emitting code is to avoid specifying raw
2107 bytes whenever possible, and instead copy a block of inline asm
2108 that is embedded in the function. This is a little messy, because
2109 we need to keep the compiler from discarding what looks like dead
2110 code, plus suppress various warnings. */
2112 #define EMIT_ASM(NAME, INSNS) \
2115 extern unsigned char start_ ## NAME, end_ ## NAME; \
2116 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2117 __asm__ ("jmp end_" #NAME "\n" \
2118 "\t" "start_" #NAME ":" \
2120 "\t" "end_" #NAME ":"); \
2125 #define EMIT_ASM32(NAME,INSNS) \
2128 extern unsigned char start_ ## NAME, end_ ## NAME; \
2129 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2130 __asm__ (".code32\n" \
2131 "\t" "jmp end_" #NAME "\n" \
2132 "\t" "start_" #NAME ":\n" \
2134 "\t" "end_" #NAME ":\n" \
2140 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2147 amd64_emit_prologue (void)
2149 EMIT_ASM (amd64_prologue
,
2151 "movq %rsp,%rbp\n\t"
2152 "sub $0x20,%rsp\n\t"
2153 "movq %rdi,-8(%rbp)\n\t"
2154 "movq %rsi,-16(%rbp)");
2159 amd64_emit_epilogue (void)
2161 EMIT_ASM (amd64_epilogue
,
2162 "movq -16(%rbp),%rdi\n\t"
2163 "movq %rax,(%rdi)\n\t"
2170 amd64_emit_add (void)
2172 EMIT_ASM (amd64_add
,
2173 "add (%rsp),%rax\n\t"
2174 "lea 0x8(%rsp),%rsp");
2178 amd64_emit_sub (void)
2180 EMIT_ASM (amd64_sub
,
2181 "sub %rax,(%rsp)\n\t"
2186 amd64_emit_mul (void)
2192 amd64_emit_lsh (void)
2198 amd64_emit_rsh_signed (void)
2204 amd64_emit_rsh_unsigned (void)
2210 amd64_emit_ext (int arg
)
2215 EMIT_ASM (amd64_ext_8
,
2221 EMIT_ASM (amd64_ext_16
,
2226 EMIT_ASM (amd64_ext_32
,
2235 amd64_emit_log_not (void)
2237 EMIT_ASM (amd64_log_not
,
2238 "test %rax,%rax\n\t"
2244 amd64_emit_bit_and (void)
2246 EMIT_ASM (amd64_and
,
2247 "and (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2252 amd64_emit_bit_or (void)
2255 "or (%rsp),%rax\n\t"
2256 "lea 0x8(%rsp),%rsp");
2260 amd64_emit_bit_xor (void)
2262 EMIT_ASM (amd64_xor
,
2263 "xor (%rsp),%rax\n\t"
2264 "lea 0x8(%rsp),%rsp");
2268 amd64_emit_bit_not (void)
2270 EMIT_ASM (amd64_bit_not
,
2271 "xorq $0xffffffffffffffff,%rax");
2275 amd64_emit_equal (void)
2277 EMIT_ASM (amd64_equal
,
2278 "cmp %rax,(%rsp)\n\t"
2279 "je .Lamd64_equal_true\n\t"
2281 "jmp .Lamd64_equal_end\n\t"
2282 ".Lamd64_equal_true:\n\t"
2284 ".Lamd64_equal_end:\n\t"
2285 "lea 0x8(%rsp),%rsp");
2289 amd64_emit_less_signed (void)
2291 EMIT_ASM (amd64_less_signed
,
2292 "cmp %rax,(%rsp)\n\t"
2293 "jl .Lamd64_less_signed_true\n\t"
2295 "jmp .Lamd64_less_signed_end\n\t"
2296 ".Lamd64_less_signed_true:\n\t"
2298 ".Lamd64_less_signed_end:\n\t"
2299 "lea 0x8(%rsp),%rsp");
2303 amd64_emit_less_unsigned (void)
2305 EMIT_ASM (amd64_less_unsigned
,
2306 "cmp %rax,(%rsp)\n\t"
2307 "jb .Lamd64_less_unsigned_true\n\t"
2309 "jmp .Lamd64_less_unsigned_end\n\t"
2310 ".Lamd64_less_unsigned_true:\n\t"
2312 ".Lamd64_less_unsigned_end:\n\t"
2313 "lea 0x8(%rsp),%rsp");
2317 amd64_emit_ref (int size
)
2322 EMIT_ASM (amd64_ref1
,
2326 EMIT_ASM (amd64_ref2
,
2330 EMIT_ASM (amd64_ref4
,
2331 "movl (%rax),%eax");
2334 EMIT_ASM (amd64_ref8
,
2335 "movq (%rax),%rax");
2341 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2343 EMIT_ASM (amd64_if_goto
,
2347 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2355 amd64_emit_goto (int *offset_p
, int *size_p
)
2357 EMIT_ASM (amd64_goto
,
2358 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2366 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2368 int diff
= (to
- (from
+ size
));
2369 unsigned char buf
[sizeof (int)];
2377 memcpy (buf
, &diff
, sizeof (int));
2378 write_inferior_memory (from
, buf
, sizeof (int));
2382 amd64_emit_const (LONGEST num
)
2384 unsigned char buf
[16];
2386 CORE_ADDR buildaddr
= current_insn_ptr
;
2389 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2390 memcpy (&buf
[i
], &num
, sizeof (num
));
2392 append_insns (&buildaddr
, i
, buf
);
2393 current_insn_ptr
= buildaddr
;
2397 amd64_emit_call (CORE_ADDR fn
)
2399 unsigned char buf
[16];
2401 CORE_ADDR buildaddr
;
2404 /* The destination function being in the shared library, may be
2405 >31-bits away off the compiled code pad. */
2407 buildaddr
= current_insn_ptr
;
2409 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2413 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2415 /* Offset is too large for a call. Use callq, but that requires
2416 a register, so avoid it if possible. Use r10, since it is
2417 call-clobbered, we don't have to push/pop it. */
2418 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2420 memcpy (buf
+ i
, &fn
, 8);
2422 buf
[i
++] = 0xff; /* callq *%r10 */
2427 int offset32
= offset64
; /* we know we can't overflow here. */
2428 memcpy (buf
+ i
, &offset32
, 4);
2432 append_insns (&buildaddr
, i
, buf
);
2433 current_insn_ptr
= buildaddr
;
2437 amd64_emit_reg (int reg
)
2439 unsigned char buf
[16];
2441 CORE_ADDR buildaddr
;
2443 /* Assume raw_regs is still in %rdi. */
2444 buildaddr
= current_insn_ptr
;
2446 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2447 memcpy (&buf
[i
], ®
, sizeof (reg
));
2449 append_insns (&buildaddr
, i
, buf
);
2450 current_insn_ptr
= buildaddr
;
2451 amd64_emit_call (get_raw_reg_func_addr ());
2455 amd64_emit_pop (void)
2457 EMIT_ASM (amd64_pop
,
2462 amd64_emit_stack_flush (void)
2464 EMIT_ASM (amd64_stack_flush
,
2469 amd64_emit_zero_ext (int arg
)
2474 EMIT_ASM (amd64_zero_ext_8
,
2478 EMIT_ASM (amd64_zero_ext_16
,
2479 "and $0xffff,%rax");
2482 EMIT_ASM (amd64_zero_ext_32
,
2483 "mov $0xffffffff,%rcx\n\t"
2492 amd64_emit_swap (void)
2494 EMIT_ASM (amd64_swap
,
2501 amd64_emit_stack_adjust (int n
)
2503 unsigned char buf
[16];
2505 CORE_ADDR buildaddr
= current_insn_ptr
;
2508 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2512 /* This only handles adjustments up to 16, but we don't expect any more. */
2514 append_insns (&buildaddr
, i
, buf
);
2515 current_insn_ptr
= buildaddr
;
2518 /* FN's prototype is `LONGEST(*fn)(int)'. */
2521 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2523 unsigned char buf
[16];
2525 CORE_ADDR buildaddr
;
2527 buildaddr
= current_insn_ptr
;
2529 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2530 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2532 append_insns (&buildaddr
, i
, buf
);
2533 current_insn_ptr
= buildaddr
;
2534 amd64_emit_call (fn
);
2537 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2540 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2542 unsigned char buf
[16];
2544 CORE_ADDR buildaddr
;
2546 buildaddr
= current_insn_ptr
;
2548 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2549 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2551 append_insns (&buildaddr
, i
, buf
);
2552 current_insn_ptr
= buildaddr
;
2553 EMIT_ASM (amd64_void_call_2_a
,
2554 /* Save away a copy of the stack top. */
2556 /* Also pass top as the second argument. */
2558 amd64_emit_call (fn
);
2559 EMIT_ASM (amd64_void_call_2_b
,
2560 /* Restore the stack top, %rax may have been trashed. */
2565 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2568 "cmp %rax,(%rsp)\n\t"
2569 "jne .Lamd64_eq_fallthru\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2572 /* jmp, but don't trust the assembler to choose the right jump */
2573 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2574 ".Lamd64_eq_fallthru:\n\t"
2575 "lea 0x8(%rsp),%rsp\n\t"
2585 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2588 "cmp %rax,(%rsp)\n\t"
2589 "je .Lamd64_ne_fallthru\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2592 /* jmp, but don't trust the assembler to choose the right jump */
2593 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2594 ".Lamd64_ne_fallthru:\n\t"
2595 "lea 0x8(%rsp),%rsp\n\t"
2605 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2608 "cmp %rax,(%rsp)\n\t"
2609 "jnl .Lamd64_lt_fallthru\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2612 /* jmp, but don't trust the assembler to choose the right jump */
2613 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2614 ".Lamd64_lt_fallthru:\n\t"
2615 "lea 0x8(%rsp),%rsp\n\t"
2625 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2628 "cmp %rax,(%rsp)\n\t"
2629 "jnle .Lamd64_le_fallthru\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2632 /* jmp, but don't trust the assembler to choose the right jump */
2633 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2634 ".Lamd64_le_fallthru:\n\t"
2635 "lea 0x8(%rsp),%rsp\n\t"
2645 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2648 "cmp %rax,(%rsp)\n\t"
2649 "jng .Lamd64_gt_fallthru\n\t"
2650 "lea 0x8(%rsp),%rsp\n\t"
2652 /* jmp, but don't trust the assembler to choose the right jump */
2653 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2654 ".Lamd64_gt_fallthru:\n\t"
2655 "lea 0x8(%rsp),%rsp\n\t"
2665 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2668 "cmp %rax,(%rsp)\n\t"
2669 "jnge .Lamd64_ge_fallthru\n\t"
2670 ".Lamd64_ge_jump:\n\t"
2671 "lea 0x8(%rsp),%rsp\n\t"
2673 /* jmp, but don't trust the assembler to choose the right jump */
2674 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2675 ".Lamd64_ge_fallthru:\n\t"
2676 "lea 0x8(%rsp),%rsp\n\t"
2685 struct emit_ops amd64_emit_ops
=
2687 amd64_emit_prologue
,
2688 amd64_emit_epilogue
,
2693 amd64_emit_rsh_signed
,
2694 amd64_emit_rsh_unsigned
,
2702 amd64_emit_less_signed
,
2703 amd64_emit_less_unsigned
,
2707 amd64_write_goto_address
,
2712 amd64_emit_stack_flush
,
2713 amd64_emit_zero_ext
,
2715 amd64_emit_stack_adjust
,
2716 amd64_emit_int_call_1
,
2717 amd64_emit_void_call_2
,
2726 #endif /* __x86_64__ */
2729 i386_emit_prologue (void)
2731 EMIT_ASM32 (i386_prologue
,
2735 /* At this point, the raw regs base address is at 8(%ebp), and the
2736 value pointer is at 12(%ebp). */
2740 i386_emit_epilogue (void)
2742 EMIT_ASM32 (i386_epilogue
,
2743 "mov 12(%ebp),%ecx\n\t"
2744 "mov %eax,(%ecx)\n\t"
2745 "mov %ebx,0x4(%ecx)\n\t"
2753 i386_emit_add (void)
2755 EMIT_ASM32 (i386_add
,
2756 "add (%esp),%eax\n\t"
2757 "adc 0x4(%esp),%ebx\n\t"
2758 "lea 0x8(%esp),%esp");
2762 i386_emit_sub (void)
2764 EMIT_ASM32 (i386_sub
,
2765 "subl %eax,(%esp)\n\t"
2766 "sbbl %ebx,4(%esp)\n\t"
2772 i386_emit_mul (void)
2778 i386_emit_lsh (void)
2784 i386_emit_rsh_signed (void)
2790 i386_emit_rsh_unsigned (void)
2796 i386_emit_ext (int arg
)
2801 EMIT_ASM32 (i386_ext_8
,
2804 "movl %eax,%ebx\n\t"
2808 EMIT_ASM32 (i386_ext_16
,
2810 "movl %eax,%ebx\n\t"
2814 EMIT_ASM32 (i386_ext_32
,
2815 "movl %eax,%ebx\n\t"
2824 i386_emit_log_not (void)
2826 EMIT_ASM32 (i386_log_not
,
2828 "test %eax,%eax\n\t"
2835 i386_emit_bit_and (void)
2837 EMIT_ASM32 (i386_and
,
2838 "and (%esp),%eax\n\t"
2839 "and 0x4(%esp),%ebx\n\t"
2840 "lea 0x8(%esp),%esp");
2844 i386_emit_bit_or (void)
2846 EMIT_ASM32 (i386_or
,
2847 "or (%esp),%eax\n\t"
2848 "or 0x4(%esp),%ebx\n\t"
2849 "lea 0x8(%esp),%esp");
2853 i386_emit_bit_xor (void)
2855 EMIT_ASM32 (i386_xor
,
2856 "xor (%esp),%eax\n\t"
2857 "xor 0x4(%esp),%ebx\n\t"
2858 "lea 0x8(%esp),%esp");
2862 i386_emit_bit_not (void)
2864 EMIT_ASM32 (i386_bit_not
,
2865 "xor $0xffffffff,%eax\n\t"
2866 "xor $0xffffffff,%ebx\n\t");
2870 i386_emit_equal (void)
2872 EMIT_ASM32 (i386_equal
,
2873 "cmpl %ebx,4(%esp)\n\t"
2874 "jne .Li386_equal_false\n\t"
2875 "cmpl %eax,(%esp)\n\t"
2876 "je .Li386_equal_true\n\t"
2877 ".Li386_equal_false:\n\t"
2879 "jmp .Li386_equal_end\n\t"
2880 ".Li386_equal_true:\n\t"
2882 ".Li386_equal_end:\n\t"
2884 "lea 0x8(%esp),%esp");
2888 i386_emit_less_signed (void)
2890 EMIT_ASM32 (i386_less_signed
,
2891 "cmpl %ebx,4(%esp)\n\t"
2892 "jl .Li386_less_signed_true\n\t"
2893 "jne .Li386_less_signed_false\n\t"
2894 "cmpl %eax,(%esp)\n\t"
2895 "jl .Li386_less_signed_true\n\t"
2896 ".Li386_less_signed_false:\n\t"
2898 "jmp .Li386_less_signed_end\n\t"
2899 ".Li386_less_signed_true:\n\t"
2901 ".Li386_less_signed_end:\n\t"
2903 "lea 0x8(%esp),%esp");
2907 i386_emit_less_unsigned (void)
2909 EMIT_ASM32 (i386_less_unsigned
,
2910 "cmpl %ebx,4(%esp)\n\t"
2911 "jb .Li386_less_unsigned_true\n\t"
2912 "jne .Li386_less_unsigned_false\n\t"
2913 "cmpl %eax,(%esp)\n\t"
2914 "jb .Li386_less_unsigned_true\n\t"
2915 ".Li386_less_unsigned_false:\n\t"
2917 "jmp .Li386_less_unsigned_end\n\t"
2918 ".Li386_less_unsigned_true:\n\t"
2920 ".Li386_less_unsigned_end:\n\t"
2922 "lea 0x8(%esp),%esp");
2926 i386_emit_ref (int size
)
2931 EMIT_ASM32 (i386_ref1
,
2935 EMIT_ASM32 (i386_ref2
,
2939 EMIT_ASM32 (i386_ref4
,
2940 "movl (%eax),%eax");
2943 EMIT_ASM32 (i386_ref8
,
2944 "movl 4(%eax),%ebx\n\t"
2945 "movl (%eax),%eax");
2951 i386_emit_if_goto (int *offset_p
, int *size_p
)
2953 EMIT_ASM32 (i386_if_goto
,
2959 /* Don't trust the assembler to choose the right jump */
2960 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2963 *offset_p
= 11; /* be sure that this matches the sequence above */
2969 i386_emit_goto (int *offset_p
, int *size_p
)
2971 EMIT_ASM32 (i386_goto
,
2972 /* Don't trust the assembler to choose the right jump */
2973 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2981 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2983 int diff
= (to
- (from
+ size
));
2984 unsigned char buf
[sizeof (int)];
2986 /* We're only doing 4-byte sizes at the moment. */
2993 memcpy (buf
, &diff
, sizeof (int));
2994 write_inferior_memory (from
, buf
, sizeof (int));
2998 i386_emit_const (LONGEST num
)
3000 unsigned char buf
[16];
3002 CORE_ADDR buildaddr
= current_insn_ptr
;
3005 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3006 lo
= num
& 0xffffffff;
3007 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3009 hi
= ((num
>> 32) & 0xffffffff);
3012 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3013 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3018 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3020 append_insns (&buildaddr
, i
, buf
);
3021 current_insn_ptr
= buildaddr
;
3025 i386_emit_call (CORE_ADDR fn
)
3027 unsigned char buf
[16];
3029 CORE_ADDR buildaddr
;
3031 buildaddr
= current_insn_ptr
;
3033 buf
[i
++] = 0xe8; /* call <reladdr> */
3034 offset
= ((int) fn
) - (buildaddr
+ 5);
3035 memcpy (buf
+ 1, &offset
, 4);
3036 append_insns (&buildaddr
, 5, buf
);
3037 current_insn_ptr
= buildaddr
;
3041 i386_emit_reg (int reg
)
3043 unsigned char buf
[16];
3045 CORE_ADDR buildaddr
;
3047 EMIT_ASM32 (i386_reg_a
,
3049 buildaddr
= current_insn_ptr
;
3051 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3052 memcpy (&buf
[i
], ®
, sizeof (reg
));
3054 append_insns (&buildaddr
, i
, buf
);
3055 current_insn_ptr
= buildaddr
;
3056 EMIT_ASM32 (i386_reg_b
,
3057 "mov %eax,4(%esp)\n\t"
3058 "mov 8(%ebp),%eax\n\t"
3060 i386_emit_call (get_raw_reg_func_addr ());
3061 EMIT_ASM32 (i386_reg_c
,
3063 "lea 0x8(%esp),%esp");
3067 i386_emit_pop (void)
3069 EMIT_ASM32 (i386_pop
,
3075 i386_emit_stack_flush (void)
3077 EMIT_ASM32 (i386_stack_flush
,
3083 i386_emit_zero_ext (int arg
)
3088 EMIT_ASM32 (i386_zero_ext_8
,
3089 "and $0xff,%eax\n\t"
3093 EMIT_ASM32 (i386_zero_ext_16
,
3094 "and $0xffff,%eax\n\t"
3098 EMIT_ASM32 (i386_zero_ext_32
,
3107 i386_emit_swap (void)
3109 EMIT_ASM32 (i386_swap
,
3119 i386_emit_stack_adjust (int n
)
3121 unsigned char buf
[16];
3123 CORE_ADDR buildaddr
= current_insn_ptr
;
3126 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3130 append_insns (&buildaddr
, i
, buf
);
3131 current_insn_ptr
= buildaddr
;
3134 /* FN's prototype is `LONGEST(*fn)(int)'. */
3137 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3139 unsigned char buf
[16];
3141 CORE_ADDR buildaddr
;
3143 EMIT_ASM32 (i386_int_call_1_a
,
3144 /* Reserve a bit of stack space. */
3146 /* Put the one argument on the stack. */
3147 buildaddr
= current_insn_ptr
;
3149 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3152 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3154 append_insns (&buildaddr
, i
, buf
);
3155 current_insn_ptr
= buildaddr
;
3156 i386_emit_call (fn
);
3157 EMIT_ASM32 (i386_int_call_1_c
,
3159 "lea 0x8(%esp),%esp");
3162 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3165 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3167 unsigned char buf
[16];
3169 CORE_ADDR buildaddr
;
3171 EMIT_ASM32 (i386_void_call_2_a
,
3172 /* Preserve %eax only; we don't have to worry about %ebx. */
3174 /* Reserve a bit of stack space for arguments. */
3175 "sub $0x10,%esp\n\t"
3176 /* Copy "top" to the second argument position. (Note that
3177 we can't assume function won't scribble on its
3178 arguments, so don't try to restore from this.) */
3179 "mov %eax,4(%esp)\n\t"
3180 "mov %ebx,8(%esp)");
3181 /* Put the first argument on the stack. */
3182 buildaddr
= current_insn_ptr
;
3184 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3187 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3189 append_insns (&buildaddr
, i
, buf
);
3190 current_insn_ptr
= buildaddr
;
3191 i386_emit_call (fn
);
3192 EMIT_ASM32 (i386_void_call_2_b
,
3193 "lea 0x10(%esp),%esp\n\t"
3194 /* Restore original stack top. */
3200 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3203 /* Check low half first, more likely to be decider */
3204 "cmpl %eax,(%esp)\n\t"
3205 "jne .Leq_fallthru\n\t"
3206 "cmpl %ebx,4(%esp)\n\t"
3207 "jne .Leq_fallthru\n\t"
3208 "lea 0x8(%esp),%esp\n\t"
3211 /* jmp, but don't trust the assembler to choose the right jump */
3212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3213 ".Leq_fallthru:\n\t"
3214 "lea 0x8(%esp),%esp\n\t"
3225 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3228 /* Check low half first, more likely to be decider */
3229 "cmpl %eax,(%esp)\n\t"
3231 "cmpl %ebx,4(%esp)\n\t"
3232 "je .Lne_fallthru\n\t"
3234 "lea 0x8(%esp),%esp\n\t"
3237 /* jmp, but don't trust the assembler to choose the right jump */
3238 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3239 ".Lne_fallthru:\n\t"
3240 "lea 0x8(%esp),%esp\n\t"
3251 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3254 "cmpl %ebx,4(%esp)\n\t"
3256 "jne .Llt_fallthru\n\t"
3257 "cmpl %eax,(%esp)\n\t"
3258 "jnl .Llt_fallthru\n\t"
3260 "lea 0x8(%esp),%esp\n\t"
3263 /* jmp, but don't trust the assembler to choose the right jump */
3264 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3265 ".Llt_fallthru:\n\t"
3266 "lea 0x8(%esp),%esp\n\t"
3277 i386_emit_le_goto (int *offset_p
, int *size_p
)
3280 "cmpl %ebx,4(%esp)\n\t"
3282 "jne .Lle_fallthru\n\t"
3283 "cmpl %eax,(%esp)\n\t"
3284 "jnle .Lle_fallthru\n\t"
3286 "lea 0x8(%esp),%esp\n\t"
3289 /* jmp, but don't trust the assembler to choose the right jump */
3290 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3291 ".Lle_fallthru:\n\t"
3292 "lea 0x8(%esp),%esp\n\t"
3303 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3306 "cmpl %ebx,4(%esp)\n\t"
3308 "jne .Lgt_fallthru\n\t"
3309 "cmpl %eax,(%esp)\n\t"
3310 "jng .Lgt_fallthru\n\t"
3312 "lea 0x8(%esp),%esp\n\t"
3315 /* jmp, but don't trust the assembler to choose the right jump */
3316 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3317 ".Lgt_fallthru:\n\t"
3318 "lea 0x8(%esp),%esp\n\t"
3329 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3332 "cmpl %ebx,4(%esp)\n\t"
3334 "jne .Lge_fallthru\n\t"
3335 "cmpl %eax,(%esp)\n\t"
3336 "jnge .Lge_fallthru\n\t"
3338 "lea 0x8(%esp),%esp\n\t"
3341 /* jmp, but don't trust the assembler to choose the right jump */
3342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3343 ".Lge_fallthru:\n\t"
3344 "lea 0x8(%esp),%esp\n\t"
3354 struct emit_ops i386_emit_ops
=
3362 i386_emit_rsh_signed
,
3363 i386_emit_rsh_unsigned
,
3371 i386_emit_less_signed
,
3372 i386_emit_less_unsigned
,
3376 i386_write_goto_address
,
3381 i386_emit_stack_flush
,
3384 i386_emit_stack_adjust
,
3385 i386_emit_int_call_1
,
3386 i386_emit_void_call_2
,
3396 static struct emit_ops
*
3400 if (is_64bit_tdesc ())
3401 return &amd64_emit_ops
;
3404 return &i386_emit_ops
;
3408 x86_supports_range_stepping (void)
3413 /* This is initialized assuming an amd64 target.
3414 x86_arch_setup will correct it for i386 or amd64 targets. */
3416 struct linux_target_ops the_low_target
=
3419 x86_linux_regs_info
,
3420 x86_cannot_fetch_register
,
3421 x86_cannot_store_register
,
3422 NULL
, /* fetch_register */
3430 x86_supports_z_point_type
,
3433 x86_stopped_by_watchpoint
,
3434 x86_stopped_data_address
,
3435 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3436 native i386 case (no registers smaller than an xfer unit), and are not
3437 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3440 /* need to fix up i386 siginfo if host is amd64 */
3442 x86_linux_new_process
,
3443 x86_linux_new_thread
,
3444 x86_linux_prepare_to_resume
,
3445 x86_linux_process_qsupported
,
3446 x86_supports_tracepoints
,
3447 x86_get_thread_area
,
3448 x86_install_fast_tracepoint_jump_pad
,
3450 x86_get_min_fast_tracepoint_insn_len
,
3451 x86_supports_range_stepping
,
3455 initialize_low_arch (void)
3457 /* Initialize the Linux target descriptions. */
3459 init_registers_amd64_linux ();
3460 init_registers_amd64_avx_linux ();
3461 init_registers_amd64_avx512_linux ();
3462 init_registers_amd64_mpx_linux ();
3464 init_registers_x32_linux ();
3465 init_registers_x32_avx_linux ();
3466 init_registers_x32_avx512_linux ();
3468 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3469 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3470 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3472 init_registers_i386_linux ();
3473 init_registers_i386_mmx_linux ();
3474 init_registers_i386_avx_linux ();
3475 init_registers_i386_avx512_linux ();
3476 init_registers_i386_mpx_linux ();
3478 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3479 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3480 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3482 initialize_regsets_info (&x86_regsets_info
);