1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct x86_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
182 static const int x86_64_regmap
[] =
184 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
185 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
186 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
187 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
188 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
189 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
212 #else /* ! __x86_64__ */
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap
[] =
218 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
219 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
220 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
221 DS
* 4, ES
* 4, FS
* 4, GS
* 4
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
232 /* Returns true if the current inferior belongs to a x86-64 process,
236 is_64bit_tdesc (void)
238 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
240 return register_size (regcache
->tdesc
, 0) == 8;
246 /* Called by libthread_db. */
249 ps_get_thread_area (const struct ps_prochandle
*ph
,
250 lwpid_t lwpid
, int idx
, void **base
)
253 int use_64bit
= is_64bit_tdesc ();
260 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
264 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
275 unsigned int desc
[4];
277 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
278 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base
= (void *) (uintptr_t) desc
[1];
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
293 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
296 int use_64bit
= is_64bit_tdesc ();
301 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
303 *addr
= (CORE_ADDR
) (uintptr_t) base
;
312 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
313 struct thread_info
*thr
= get_lwp_thread (lwp
);
314 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
315 unsigned int desc
[4];
317 const int reg_thread_area
= 3; /* bits to scale down register value. */
320 collect_register_by_name (regcache
, "gs", &gs
);
322 idx
= gs
>> reg_thread_area
;
324 if (ptrace (PTRACE_GET_THREAD_AREA
,
326 (void *) (long) idx
, (unsigned long) &desc
) < 0)
337 x86_cannot_store_register (int regno
)
340 if (is_64bit_tdesc ())
344 return regno
>= I386_NUM_REGS
;
348 x86_cannot_fetch_register (int regno
)
351 if (is_64bit_tdesc ())
355 return regno
>= I386_NUM_REGS
;
359 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
364 if (register_size (regcache
->tdesc
, 0) == 8)
366 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
367 if (x86_64_regmap
[i
] != -1)
368 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
377 for (i
= 0; i
< I386_NUM_REGS
; i
++)
378 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
380 collect_register_by_name (regcache
, "orig_eax",
381 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
385 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
390 if (register_size (regcache
->tdesc
, 0) == 8)
392 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
393 if (x86_64_regmap
[i
] != -1)
394 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
399 for (i
= 0; i
< I386_NUM_REGS
; i
++)
400 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
402 supply_register_by_name (regcache
, "orig_eax",
403 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
407 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
410 i387_cache_to_fxsave (regcache
, buf
);
412 i387_cache_to_fsave (regcache
, buf
);
417 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
420 i387_fxsave_to_cache (regcache
, buf
);
422 i387_fsave_to_cache (regcache
, buf
);
429 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
431 i387_cache_to_fxsave (regcache
, buf
);
435 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
437 i387_fxsave_to_cache (regcache
, buf
);
443 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
445 i387_cache_to_xsave (regcache
, buf
);
449 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
451 i387_xsave_to_cache (regcache
, buf
);
454 /* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
459 and update the supported regsets accordingly. */
461 static struct regset_info x86_regsets
[] =
463 #ifdef HAVE_PTRACE_GETREGS
464 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
466 x86_fill_gregset
, x86_store_gregset
},
467 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
468 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
470 # ifdef HAVE_PTRACE_GETFPXREGS
471 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
473 x86_fill_fpxregset
, x86_store_fpxregset
},
476 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
478 x86_fill_fpregset
, x86_store_fpregset
},
479 #endif /* HAVE_PTRACE_GETREGS */
480 { 0, 0, 0, -1, -1, NULL
, NULL
}
484 x86_get_pc (struct regcache
*regcache
)
486 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
491 collect_register_by_name (regcache
, "rip", &pc
);
492 return (CORE_ADDR
) pc
;
497 collect_register_by_name (regcache
, "eip", &pc
);
498 return (CORE_ADDR
) pc
;
503 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
505 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
509 unsigned long newpc
= pc
;
510 supply_register_by_name (regcache
, "rip", &newpc
);
514 unsigned int newpc
= pc
;
515 supply_register_by_name (regcache
, "eip", &newpc
);
519 static const unsigned char x86_breakpoint
[] = { 0xCC };
520 #define x86_breakpoint_len 1
523 x86_breakpoint_at (CORE_ADDR pc
)
527 (*the_target
->read_memory
) (pc
, &c
, 1);
535 /* Return the offset of REGNUM in the u_debugreg field of struct
539 u_debugreg_offset (int regnum
)
541 return (offsetof (struct user
, u_debugreg
)
542 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
546 /* Support for debug registers. */
549 x86_linux_dr_get (ptid_t ptid
, int regnum
)
554 tid
= ptid_get_lwp (ptid
);
557 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
559 error ("Couldn't read debug register");
565 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
569 tid
= ptid_get_lwp (ptid
);
572 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
574 error ("Couldn't write debug register");
578 update_debug_registers_callback (struct inferior_list_entry
*entry
,
581 struct thread_info
*thr
= (struct thread_info
*) entry
;
582 struct lwp_info
*lwp
= get_thread_lwp (thr
);
583 int pid
= *(int *) pid_p
;
585 /* Only update the threads of this process. */
586 if (pid_of (thr
) == pid
)
588 /* The actual update is done later just before resuming the lwp,
589 we just mark that the registers need updating. */
590 lwp
->arch_private
->debug_registers_changed
= 1;
592 /* If the lwp isn't stopped, force it to momentarily pause, so
593 we can update its debug registers. */
595 linux_stop_lwp (lwp
);
601 /* Update the inferior's debug register REGNUM from STATE. */
604 x86_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
606 /* Only update the threads of this process. */
607 int pid
= pid_of (current_thread
);
609 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
611 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
614 /* Return the inferior's debug register REGNUM. */
617 x86_dr_low_get_addr (int regnum
)
619 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
621 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
624 /* Update the inferior's DR7 debug control register from STATE. */
627 x86_dr_low_set_control (unsigned long control
)
629 /* Only update the threads of this process. */
630 int pid
= pid_of (current_thread
);
632 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
635 /* Return the inferior's DR7 debug control register. */
638 x86_dr_low_get_control (void)
640 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
643 /* Get the value of the DR6 debug status register from the inferior
644 and record it in STATE. */
647 x86_dr_low_get_status (void)
649 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
652 /* Low-level function vector. */
653 struct x86_dr_low_type x86_dr_low
=
655 x86_dr_low_set_control
,
658 x86_dr_low_get_status
,
659 x86_dr_low_get_control
,
663 /* Breakpoint/Watchpoint support. */
666 x86_supports_z_point_type (char z_type
)
672 case Z_PACKET_WRITE_WP
:
673 case Z_PACKET_ACCESS_WP
:
681 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
682 int size
, struct raw_breakpoint
*bp
)
684 struct process_info
*proc
= current_process ();
688 case raw_bkpt_type_sw
:
689 return insert_memory_breakpoint (bp
);
691 case raw_bkpt_type_hw
:
692 case raw_bkpt_type_write_wp
:
693 case raw_bkpt_type_access_wp
:
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type
);
697 struct x86_debug_reg_state
*state
698 = &proc
->priv
->arch_private
->debug_reg_state
;
700 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
710 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
711 int size
, struct raw_breakpoint
*bp
)
713 struct process_info
*proc
= current_process ();
717 case raw_bkpt_type_sw
:
718 return remove_memory_breakpoint (bp
);
720 case raw_bkpt_type_hw
:
721 case raw_bkpt_type_write_wp
:
722 case raw_bkpt_type_access_wp
:
724 enum target_hw_bp_type hw_type
725 = raw_bkpt_type_to_target_hw_bp_type (type
);
726 struct x86_debug_reg_state
*state
727 = &proc
->priv
->arch_private
->debug_reg_state
;
729 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
738 x86_stopped_by_watchpoint (void)
740 struct process_info
*proc
= current_process ();
741 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
745 x86_stopped_data_address (void)
747 struct process_info
*proc
= current_process ();
749 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
755 /* Called when a new process is created. */
757 static struct arch_process_info
*
758 x86_linux_new_process (void)
760 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
762 x86_low_init_dregs (&info
->debug_reg_state
);
767 /* Called when a new thread is detected. */
769 static struct arch_lwp_info
*
770 x86_linux_new_thread (void)
772 struct arch_lwp_info
*info
= XCNEW (struct arch_lwp_info
);
774 info
->debug_registers_changed
= 1;
779 /* See nat/x86-dregs.h. */
781 struct x86_debug_reg_state
*
782 x86_debug_reg_state (pid_t pid
)
784 struct process_info
*proc
= find_process_pid (pid
);
786 return &proc
->priv
->arch_private
->debug_reg_state
;
789 /* Called when resuming a thread.
790 If the debug regs have changed, update the thread's copies. */
793 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
795 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
796 int clear_status
= 0;
798 if (lwp
->arch_private
->debug_registers_changed
)
800 struct x86_debug_reg_state
*state
801 = x86_debug_reg_state (ptid_get_pid (ptid
));
804 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
806 ALL_DEBUG_ADDRESS_REGISTERS (i
)
807 if (state
->dr_ref_count
[i
] > 0)
809 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
811 /* If we're setting a watchpoint, any change the inferior
812 had done itself to the debug registers needs to be
813 discarded, otherwise, x86_dr_stopped_data_address can
818 if (state
->dr_control_mirror
!= 0)
819 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
821 lwp
->arch_private
->debug_registers_changed
= 0;
824 if (clear_status
|| lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
825 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
828 /* When GDBSERVER is built as a 64-bit application on linux, the
829 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
830 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
831 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
832 conversion in-place ourselves. */
834 /* These types below (compat_*) define a siginfo type that is layout
835 compatible with the siginfo type exported by the 32-bit userspace
840 typedef int compat_int_t
;
841 typedef unsigned int compat_uptr_t
;
843 typedef int compat_time_t
;
844 typedef int compat_timer_t
;
845 typedef int compat_clock_t
;
847 struct compat_timeval
849 compat_time_t tv_sec
;
853 typedef union compat_sigval
855 compat_int_t sival_int
;
856 compat_uptr_t sival_ptr
;
859 typedef struct compat_siginfo
867 int _pad
[((128 / sizeof (int)) - 3)];
876 /* POSIX.1b timers */
881 compat_sigval_t _sigval
;
884 /* POSIX.1b signals */
889 compat_sigval_t _sigval
;
898 compat_clock_t _utime
;
899 compat_clock_t _stime
;
902 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
917 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
918 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
920 typedef struct compat_x32_siginfo
928 int _pad
[((128 / sizeof (int)) - 3)];
937 /* POSIX.1b timers */
942 compat_sigval_t _sigval
;
945 /* POSIX.1b signals */
950 compat_sigval_t _sigval
;
959 compat_x32_clock_t _utime
;
960 compat_x32_clock_t _stime
;
963 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
976 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
978 #define cpt_si_pid _sifields._kill._pid
979 #define cpt_si_uid _sifields._kill._uid
980 #define cpt_si_timerid _sifields._timer._tid
981 #define cpt_si_overrun _sifields._timer._overrun
982 #define cpt_si_status _sifields._sigchld._status
983 #define cpt_si_utime _sifields._sigchld._utime
984 #define cpt_si_stime _sifields._sigchld._stime
985 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
986 #define cpt_si_addr _sifields._sigfault._addr
987 #define cpt_si_band _sifields._sigpoll._band
988 #define cpt_si_fd _sifields._sigpoll._fd
990 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
991 In their place is si_timer1,si_timer2. */
993 #define si_timerid si_timer1
996 #define si_overrun si_timer2
1000 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
1002 memset (to
, 0, sizeof (*to
));
1004 to
->si_signo
= from
->si_signo
;
1005 to
->si_errno
= from
->si_errno
;
1006 to
->si_code
= from
->si_code
;
1008 if (to
->si_code
== SI_TIMER
)
1010 to
->cpt_si_timerid
= from
->si_timerid
;
1011 to
->cpt_si_overrun
= from
->si_overrun
;
1012 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1014 else if (to
->si_code
== SI_USER
)
1016 to
->cpt_si_pid
= from
->si_pid
;
1017 to
->cpt_si_uid
= from
->si_uid
;
1019 else if (to
->si_code
< 0)
1021 to
->cpt_si_pid
= from
->si_pid
;
1022 to
->cpt_si_uid
= from
->si_uid
;
1023 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1027 switch (to
->si_signo
)
1030 to
->cpt_si_pid
= from
->si_pid
;
1031 to
->cpt_si_uid
= from
->si_uid
;
1032 to
->cpt_si_status
= from
->si_status
;
1033 to
->cpt_si_utime
= from
->si_utime
;
1034 to
->cpt_si_stime
= from
->si_stime
;
1040 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1043 to
->cpt_si_band
= from
->si_band
;
1044 to
->cpt_si_fd
= from
->si_fd
;
1047 to
->cpt_si_pid
= from
->si_pid
;
1048 to
->cpt_si_uid
= from
->si_uid
;
1049 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1056 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1058 memset (to
, 0, sizeof (*to
));
1060 to
->si_signo
= from
->si_signo
;
1061 to
->si_errno
= from
->si_errno
;
1062 to
->si_code
= from
->si_code
;
1064 if (to
->si_code
== SI_TIMER
)
1066 to
->si_timerid
= from
->cpt_si_timerid
;
1067 to
->si_overrun
= from
->cpt_si_overrun
;
1068 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1070 else if (to
->si_code
== SI_USER
)
1072 to
->si_pid
= from
->cpt_si_pid
;
1073 to
->si_uid
= from
->cpt_si_uid
;
1075 else if (to
->si_code
< 0)
1077 to
->si_pid
= from
->cpt_si_pid
;
1078 to
->si_uid
= from
->cpt_si_uid
;
1079 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1083 switch (to
->si_signo
)
1086 to
->si_pid
= from
->cpt_si_pid
;
1087 to
->si_uid
= from
->cpt_si_uid
;
1088 to
->si_status
= from
->cpt_si_status
;
1089 to
->si_utime
= from
->cpt_si_utime
;
1090 to
->si_stime
= from
->cpt_si_stime
;
1096 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1099 to
->si_band
= from
->cpt_si_band
;
1100 to
->si_fd
= from
->cpt_si_fd
;
1103 to
->si_pid
= from
->cpt_si_pid
;
1104 to
->si_uid
= from
->cpt_si_uid
;
1105 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1112 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1115 memset (to
, 0, sizeof (*to
));
1117 to
->si_signo
= from
->si_signo
;
1118 to
->si_errno
= from
->si_errno
;
1119 to
->si_code
= from
->si_code
;
1121 if (to
->si_code
== SI_TIMER
)
1123 to
->cpt_si_timerid
= from
->si_timerid
;
1124 to
->cpt_si_overrun
= from
->si_overrun
;
1125 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1127 else if (to
->si_code
== SI_USER
)
1129 to
->cpt_si_pid
= from
->si_pid
;
1130 to
->cpt_si_uid
= from
->si_uid
;
1132 else if (to
->si_code
< 0)
1134 to
->cpt_si_pid
= from
->si_pid
;
1135 to
->cpt_si_uid
= from
->si_uid
;
1136 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1140 switch (to
->si_signo
)
1143 to
->cpt_si_pid
= from
->si_pid
;
1144 to
->cpt_si_uid
= from
->si_uid
;
1145 to
->cpt_si_status
= from
->si_status
;
1146 to
->cpt_si_utime
= from
->si_utime
;
1147 to
->cpt_si_stime
= from
->si_stime
;
1153 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1156 to
->cpt_si_band
= from
->si_band
;
1157 to
->cpt_si_fd
= from
->si_fd
;
1160 to
->cpt_si_pid
= from
->si_pid
;
1161 to
->cpt_si_uid
= from
->si_uid
;
1162 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1169 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1170 compat_x32_siginfo_t
*from
)
1172 memset (to
, 0, sizeof (*to
));
1174 to
->si_signo
= from
->si_signo
;
1175 to
->si_errno
= from
->si_errno
;
1176 to
->si_code
= from
->si_code
;
1178 if (to
->si_code
== SI_TIMER
)
1180 to
->si_timerid
= from
->cpt_si_timerid
;
1181 to
->si_overrun
= from
->cpt_si_overrun
;
1182 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1184 else if (to
->si_code
== SI_USER
)
1186 to
->si_pid
= from
->cpt_si_pid
;
1187 to
->si_uid
= from
->cpt_si_uid
;
1189 else if (to
->si_code
< 0)
1191 to
->si_pid
= from
->cpt_si_pid
;
1192 to
->si_uid
= from
->cpt_si_uid
;
1193 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1197 switch (to
->si_signo
)
1200 to
->si_pid
= from
->cpt_si_pid
;
1201 to
->si_uid
= from
->cpt_si_uid
;
1202 to
->si_status
= from
->cpt_si_status
;
1203 to
->si_utime
= from
->cpt_si_utime
;
1204 to
->si_stime
= from
->cpt_si_stime
;
1210 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1213 to
->si_band
= from
->cpt_si_band
;
1214 to
->si_fd
= from
->cpt_si_fd
;
1217 to
->si_pid
= from
->cpt_si_pid
;
1218 to
->si_uid
= from
->cpt_si_uid
;
1219 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1225 #endif /* __x86_64__ */
1227 /* Convert a native/host siginfo object, into/from the siginfo in the
1228 layout of the inferiors' architecture. Returns true if any
1229 conversion was done; false otherwise. If DIRECTION is 1, then copy
1230 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1234 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1237 unsigned int machine
;
1238 int tid
= lwpid_of (current_thread
);
1239 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1241 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1242 if (!is_64bit_tdesc ())
1244 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1247 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1249 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1253 /* No fixup for native x32 GDB. */
1254 else if (!is_elf64
&& sizeof (void *) == 8)
1256 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1259 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1262 siginfo_from_compat_x32_siginfo (native
,
1263 (struct compat_x32_siginfo
*) inf
);
1274 /* Format of XSAVE extended state is:
1277 fxsave_bytes[0..463]
1278 sw_usable_bytes[464..511]
1279 xstate_hdr_bytes[512..575]
1284 Same memory layout will be used for the coredump NT_X86_XSTATE
1285 representing the XSAVE extended state registers.
1287 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1288 extended state mask, which is the same as the extended control register
1289 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1290 together with the mask saved in the xstate_hdr_bytes to determine what
1291 states the processor/OS supports and what state, used or initialized,
1292 the process/thread is in. */
1293 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1295 /* Does the current host support the GETFPXREGS request? The header
1296 file may or may not define it, and even if it is defined, the
1297 kernel will return EIO if it's running on a pre-SSE processor. */
1298 int have_ptrace_getfpxregs
=
1299 #ifdef HAVE_PTRACE_GETFPXREGS
1306 /* Does the current host support PTRACE_GETREGSET? */
1307 static int have_ptrace_getregset
= -1;
1309 /* Get Linux/x86 target description from running target. */
1311 static const struct target_desc
*
1312 x86_linux_read_description (void)
1314 unsigned int machine
;
1318 static uint64_t xcr0
;
1319 struct regset_info
*regset
;
1321 tid
= lwpid_of (current_thread
);
1323 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1325 if (sizeof (void *) == 4)
1328 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1330 else if (machine
== EM_X86_64
)
1331 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1335 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1336 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1338 elf_fpxregset_t fpxregs
;
1340 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1342 have_ptrace_getfpxregs
= 0;
1343 have_ptrace_getregset
= 0;
1344 return tdesc_i386_mmx_linux
;
1347 have_ptrace_getfpxregs
= 1;
1353 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1355 /* Don't use XML. */
1357 if (machine
== EM_X86_64
)
1358 return tdesc_amd64_linux_no_xml
;
1361 return tdesc_i386_linux_no_xml
;
1364 if (have_ptrace_getregset
== -1)
1366 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1369 iov
.iov_base
= xstateregs
;
1370 iov
.iov_len
= sizeof (xstateregs
);
1372 /* Check if PTRACE_GETREGSET works. */
1373 if (ptrace (PTRACE_GETREGSET
, tid
,
1374 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1375 have_ptrace_getregset
= 0;
1378 have_ptrace_getregset
= 1;
1380 /* Get XCR0 from XSAVE extended state. */
1381 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1382 / sizeof (uint64_t))];
1384 /* Use PTRACE_GETREGSET if it is available. */
1385 for (regset
= x86_regsets
;
1386 regset
->fill_function
!= NULL
; regset
++)
1387 if (regset
->get_request
== PTRACE_GETREGSET
)
1388 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1389 else if (regset
->type
!= GENERAL_REGS
)
1394 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1395 xcr0_features
= (have_ptrace_getregset
1396 && (xcr0
& X86_XSTATE_ALL_MASK
));
1401 if (machine
== EM_X86_64
)
1408 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1410 case X86_XSTATE_AVX512_MASK
:
1411 return tdesc_amd64_avx512_linux
;
1413 case X86_XSTATE_MPX_MASK
:
1414 return tdesc_amd64_mpx_linux
;
1416 case X86_XSTATE_AVX_MASK
:
1417 return tdesc_amd64_avx_linux
;
1420 return tdesc_amd64_linux
;
1424 return tdesc_amd64_linux
;
1430 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1432 case X86_XSTATE_AVX512_MASK
:
1433 return tdesc_x32_avx512_linux
;
1435 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1436 case X86_XSTATE_AVX_MASK
:
1437 return tdesc_x32_avx_linux
;
1440 return tdesc_x32_linux
;
1444 return tdesc_x32_linux
;
1452 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1454 case (X86_XSTATE_AVX512_MASK
):
1455 return tdesc_i386_avx512_linux
;
1457 case (X86_XSTATE_MPX_MASK
):
1458 return tdesc_i386_mpx_linux
;
1460 case (X86_XSTATE_AVX_MASK
):
1461 return tdesc_i386_avx_linux
;
1464 return tdesc_i386_linux
;
1468 return tdesc_i386_linux
;
1471 gdb_assert_not_reached ("failed to return tdesc");
1474 /* Callback for find_inferior. Stops iteration when a thread with a
1475 given PID is found. */
1478 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1480 int pid
= *(int *) data
;
1482 return (ptid_get_pid (entry
->id
) == pid
);
1485 /* Callback for for_each_inferior. Calls the arch_setup routine for
1489 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1491 int pid
= ptid_get_pid (entry
->id
);
1493 /* Look up any thread of this processes. */
1495 = (struct thread_info
*) find_inferior (&all_threads
,
1496 same_process_callback
, &pid
);
1498 the_low_target
.arch_setup ();
1501 /* Update all the target description of all processes; a new GDB
1502 connected, and it may or not support xml target descriptions. */
1505 x86_linux_update_xmltarget (void)
1507 struct thread_info
*saved_thread
= current_thread
;
1509 /* Before changing the register cache's internal layout, flush the
1510 contents of the current valid caches back to the threads, and
1511 release the current regcache objects. */
1512 regcache_release ();
1514 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1516 current_thread
= saved_thread
;
1519 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1520 PTRACE_GETREGSET. */
1523 x86_linux_process_qsupported (const char *query
)
1525 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1526 with "i386" in qSupported query, it supports x86 XML target
1529 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1531 char *copy
= xstrdup (query
+ 13);
1534 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1536 if (strcmp (p
, "i386") == 0)
1546 x86_linux_update_xmltarget ();
1549 /* Common for x86/x86-64. */
1551 static struct regsets_info x86_regsets_info
=
1553 x86_regsets
, /* regsets */
1554 0, /* num_regsets */
1555 NULL
, /* disabled_regsets */
1559 static struct regs_info amd64_linux_regs_info
=
1561 NULL
, /* regset_bitmap */
1562 NULL
, /* usrregs_info */
1566 static struct usrregs_info i386_linux_usrregs_info
=
1572 static struct regs_info i386_linux_regs_info
=
1574 NULL
, /* regset_bitmap */
1575 &i386_linux_usrregs_info
,
1579 const struct regs_info
*
1580 x86_linux_regs_info (void)
1583 if (is_64bit_tdesc ())
1584 return &amd64_linux_regs_info
;
1587 return &i386_linux_regs_info
;
1590 /* Initialize the target description for the architecture of the
1594 x86_arch_setup (void)
1596 current_process ()->tdesc
= x86_linux_read_description ();
1600 x86_supports_tracepoints (void)
1606 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1608 write_inferior_memory (*to
, buf
, len
);
1613 push_opcode (unsigned char *buf
, char *op
)
1615 unsigned char *buf_org
= buf
;
1620 unsigned long ul
= strtoul (op
, &endptr
, 16);
1629 return buf
- buf_org
;
1634 /* Build a jump pad that saves registers and calls a collection
1635 function. Writes a jump instruction to the jump pad to
1636 JJUMPAD_INSN. The caller is responsible to write it in at the
1637 tracepoint address. */
1640 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1641 CORE_ADDR collector
,
1644 CORE_ADDR
*jump_entry
,
1645 CORE_ADDR
*trampoline
,
1646 ULONGEST
*trampoline_size
,
1647 unsigned char *jjump_pad_insn
,
1648 ULONGEST
*jjump_pad_insn_size
,
1649 CORE_ADDR
*adjusted_insn_addr
,
1650 CORE_ADDR
*adjusted_insn_addr_end
,
1653 unsigned char buf
[40];
1657 CORE_ADDR buildaddr
= *jump_entry
;
1659 /* Build the jump pad. */
1661 /* First, do tracepoint data collection. Save registers. */
1663 /* Need to ensure stack pointer saved first. */
1664 buf
[i
++] = 0x54; /* push %rsp */
1665 buf
[i
++] = 0x55; /* push %rbp */
1666 buf
[i
++] = 0x57; /* push %rdi */
1667 buf
[i
++] = 0x56; /* push %rsi */
1668 buf
[i
++] = 0x52; /* push %rdx */
1669 buf
[i
++] = 0x51; /* push %rcx */
1670 buf
[i
++] = 0x53; /* push %rbx */
1671 buf
[i
++] = 0x50; /* push %rax */
1672 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1673 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1674 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1675 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1676 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1677 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1678 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1679 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1680 buf
[i
++] = 0x9c; /* pushfq */
1681 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1683 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1684 i
+= sizeof (unsigned long);
1685 buf
[i
++] = 0x57; /* push %rdi */
1686 append_insns (&buildaddr
, i
, buf
);
1688 /* Stack space for the collecting_t object. */
1690 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1691 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1692 memcpy (buf
+ i
, &tpoint
, 8);
1694 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1695 i
+= push_opcode (&buf
[i
],
1696 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1697 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1698 append_insns (&buildaddr
, i
, buf
);
1702 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1703 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1705 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1706 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1707 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1708 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1709 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1710 append_insns (&buildaddr
, i
, buf
);
1712 /* Set up the gdb_collect call. */
1713 /* At this point, (stack pointer + 0x18) is the base of our saved
1717 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1718 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1720 /* tpoint address may be 64-bit wide. */
1721 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1722 memcpy (buf
+ i
, &tpoint
, 8);
1724 append_insns (&buildaddr
, i
, buf
);
1726 /* The collector function being in the shared library, may be
1727 >31-bits away off the jump pad. */
1729 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1730 memcpy (buf
+ i
, &collector
, 8);
1732 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1733 append_insns (&buildaddr
, i
, buf
);
1735 /* Clear the spin-lock. */
1737 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1738 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1739 memcpy (buf
+ i
, &lockaddr
, 8);
1741 append_insns (&buildaddr
, i
, buf
);
1743 /* Remove stack that had been used for the collect_t object. */
1745 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1746 append_insns (&buildaddr
, i
, buf
);
1748 /* Restore register state. */
1750 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1754 buf
[i
++] = 0x9d; /* popfq */
1755 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1756 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1757 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1758 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1759 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1760 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1761 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1762 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1763 buf
[i
++] = 0x58; /* pop %rax */
1764 buf
[i
++] = 0x5b; /* pop %rbx */
1765 buf
[i
++] = 0x59; /* pop %rcx */
1766 buf
[i
++] = 0x5a; /* pop %rdx */
1767 buf
[i
++] = 0x5e; /* pop %rsi */
1768 buf
[i
++] = 0x5f; /* pop %rdi */
1769 buf
[i
++] = 0x5d; /* pop %rbp */
1770 buf
[i
++] = 0x5c; /* pop %rsp */
1771 append_insns (&buildaddr
, i
, buf
);
1773 /* Now, adjust the original instruction to execute in the jump
1775 *adjusted_insn_addr
= buildaddr
;
1776 relocate_instruction (&buildaddr
, tpaddr
);
1777 *adjusted_insn_addr_end
= buildaddr
;
1779 /* Finally, write a jump back to the program. */
1781 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1782 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1785 "E.Jump back from jump pad too far from tracepoint "
1786 "(offset 0x%" PRIx64
" > int32).", loffset
);
1790 offset
= (int) loffset
;
1791 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1792 memcpy (buf
+ 1, &offset
, 4);
1793 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1795 /* The jump pad is now built. Wire in a jump to our jump pad. This
1796 is always done last (by our caller actually), so that we can
1797 install fast tracepoints with threads running. This relies on
1798 the agent's atomic write support. */
1799 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1800 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1803 "E.Jump pad too far from tracepoint "
1804 "(offset 0x%" PRIx64
" > int32).", loffset
);
1808 offset
= (int) loffset
;
1810 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1811 memcpy (buf
+ 1, &offset
, 4);
1812 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1813 *jjump_pad_insn_size
= sizeof (jump_insn
);
1815 /* Return the end address of our pad. */
1816 *jump_entry
= buildaddr
;
1821 #endif /* __x86_64__ */
1823 /* Build a jump pad that saves registers and calls a collection
1824 function. Writes a jump instruction to the jump pad to
1825 JJUMPAD_INSN. The caller is responsible to write it in at the
1826 tracepoint address. */
1829 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1830 CORE_ADDR collector
,
1833 CORE_ADDR
*jump_entry
,
1834 CORE_ADDR
*trampoline
,
1835 ULONGEST
*trampoline_size
,
1836 unsigned char *jjump_pad_insn
,
1837 ULONGEST
*jjump_pad_insn_size
,
1838 CORE_ADDR
*adjusted_insn_addr
,
1839 CORE_ADDR
*adjusted_insn_addr_end
,
1842 unsigned char buf
[0x100];
1844 CORE_ADDR buildaddr
= *jump_entry
;
1846 /* Build the jump pad. */
1848 /* First, do tracepoint data collection. Save registers. */
1850 buf
[i
++] = 0x60; /* pushad */
1851 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1852 *((int *)(buf
+ i
)) = (int) tpaddr
;
1854 buf
[i
++] = 0x9c; /* pushf */
1855 buf
[i
++] = 0x1e; /* push %ds */
1856 buf
[i
++] = 0x06; /* push %es */
1857 buf
[i
++] = 0x0f; /* push %fs */
1859 buf
[i
++] = 0x0f; /* push %gs */
1861 buf
[i
++] = 0x16; /* push %ss */
1862 buf
[i
++] = 0x0e; /* push %cs */
1863 append_insns (&buildaddr
, i
, buf
);
1865 /* Stack space for the collecting_t object. */
1867 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1869 /* Build the object. */
1870 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1871 memcpy (buf
+ i
, &tpoint
, 4);
1873 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1875 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1876 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1877 append_insns (&buildaddr
, i
, buf
);
1879 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1880 If we cared for it, this could be using xchg alternatively. */
1883 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1884 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1886 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1888 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1889 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1890 append_insns (&buildaddr
, i
, buf
);
1893 /* Set up arguments to the gdb_collect call. */
1895 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1896 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1897 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1898 append_insns (&buildaddr
, i
, buf
);
1901 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1902 append_insns (&buildaddr
, i
, buf
);
1905 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1906 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1908 append_insns (&buildaddr
, i
, buf
);
1910 buf
[0] = 0xe8; /* call <reladdr> */
1911 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1912 memcpy (buf
+ 1, &offset
, 4);
1913 append_insns (&buildaddr
, 5, buf
);
1914 /* Clean up after the call. */
1915 buf
[0] = 0x83; /* add $0x8,%esp */
1918 append_insns (&buildaddr
, 3, buf
);
1921 /* Clear the spin-lock. This would need the LOCK prefix on older
1924 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1925 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1926 memcpy (buf
+ i
, &lockaddr
, 4);
1928 append_insns (&buildaddr
, i
, buf
);
1931 /* Remove stack that had been used for the collect_t object. */
1933 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1934 append_insns (&buildaddr
, i
, buf
);
1937 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1940 buf
[i
++] = 0x17; /* pop %ss */
1941 buf
[i
++] = 0x0f; /* pop %gs */
1943 buf
[i
++] = 0x0f; /* pop %fs */
1945 buf
[i
++] = 0x07; /* pop %es */
1946 buf
[i
++] = 0x1f; /* pop %ds */
1947 buf
[i
++] = 0x9d; /* popf */
1948 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1951 buf
[i
++] = 0x61; /* popad */
1952 append_insns (&buildaddr
, i
, buf
);
1954 /* Now, adjust the original instruction to execute in the jump
1956 *adjusted_insn_addr
= buildaddr
;
1957 relocate_instruction (&buildaddr
, tpaddr
);
1958 *adjusted_insn_addr_end
= buildaddr
;
1960 /* Write the jump back to the program. */
1961 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1962 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1963 memcpy (buf
+ 1, &offset
, 4);
1964 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1966 /* The jump pad is now built. Wire in a jump to our jump pad. This
1967 is always done last (by our caller actually), so that we can
1968 install fast tracepoints with threads running. This relies on
1969 the agent's atomic write support. */
1972 /* Create a trampoline. */
1973 *trampoline_size
= sizeof (jump_insn
);
1974 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1976 /* No trampoline space available. */
1978 "E.Cannot allocate trampoline space needed for fast "
1979 "tracepoints on 4-byte instructions.");
1983 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1984 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1985 memcpy (buf
+ 1, &offset
, 4);
1986 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1988 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1989 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1990 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1991 memcpy (buf
+ 2, &offset
, 2);
1992 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1993 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1997 /* Else use a 32-bit relative jump instruction. */
1998 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1999 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
2000 memcpy (buf
+ 1, &offset
, 4);
2001 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
2002 *jjump_pad_insn_size
= sizeof (jump_insn
);
2005 /* Return the end address of our pad. */
2006 *jump_entry
= buildaddr
;
2012 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2013 CORE_ADDR collector
,
2016 CORE_ADDR
*jump_entry
,
2017 CORE_ADDR
*trampoline
,
2018 ULONGEST
*trampoline_size
,
2019 unsigned char *jjump_pad_insn
,
2020 ULONGEST
*jjump_pad_insn_size
,
2021 CORE_ADDR
*adjusted_insn_addr
,
2022 CORE_ADDR
*adjusted_insn_addr_end
,
2026 if (is_64bit_tdesc ())
2027 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2028 collector
, lockaddr
,
2029 orig_size
, jump_entry
,
2030 trampoline
, trampoline_size
,
2032 jjump_pad_insn_size
,
2034 adjusted_insn_addr_end
,
2038 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2039 collector
, lockaddr
,
2040 orig_size
, jump_entry
,
2041 trampoline
, trampoline_size
,
2043 jjump_pad_insn_size
,
2045 adjusted_insn_addr_end
,
2049 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2053 x86_get_min_fast_tracepoint_insn_len (void)
2055 static int warned_about_fast_tracepoints
= 0;
2058 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2059 used for fast tracepoints. */
2060 if (is_64bit_tdesc ())
2064 if (agent_loaded_p ())
2066 char errbuf
[IPA_BUFSIZ
];
2070 /* On x86, if trampolines are available, then 4-byte jump instructions
2071 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2072 with a 4-byte offset are used instead. */
2073 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2077 /* GDB has no channel to explain to user why a shorter fast
2078 tracepoint is not possible, but at least make GDBserver
2079 mention that something has gone awry. */
2080 if (!warned_about_fast_tracepoints
)
2082 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2083 warned_about_fast_tracepoints
= 1;
2090 /* Indicate that the minimum length is currently unknown since the IPA
2091 has not loaded yet. */
2097 add_insns (unsigned char *start
, int len
)
2099 CORE_ADDR buildaddr
= current_insn_ptr
;
2102 debug_printf ("Adding %d bytes of insn at %s\n",
2103 len
, paddress (buildaddr
));
2105 append_insns (&buildaddr
, len
, start
);
2106 current_insn_ptr
= buildaddr
;
2109 /* Our general strategy for emitting code is to avoid specifying raw
2110 bytes whenever possible, and instead copy a block of inline asm
2111 that is embedded in the function. This is a little messy, because
2112 we need to keep the compiler from discarding what looks like dead
2113 code, plus suppress various warnings. */
2115 #define EMIT_ASM(NAME, INSNS) \
2118 extern unsigned char start_ ## NAME, end_ ## NAME; \
2119 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2120 __asm__ ("jmp end_" #NAME "\n" \
2121 "\t" "start_" #NAME ":" \
2123 "\t" "end_" #NAME ":"); \
2128 #define EMIT_ASM32(NAME,INSNS) \
2131 extern unsigned char start_ ## NAME, end_ ## NAME; \
2132 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2133 __asm__ (".code32\n" \
2134 "\t" "jmp end_" #NAME "\n" \
2135 "\t" "start_" #NAME ":\n" \
2137 "\t" "end_" #NAME ":\n" \
2143 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2150 amd64_emit_prologue (void)
2152 EMIT_ASM (amd64_prologue
,
2154 "movq %rsp,%rbp\n\t"
2155 "sub $0x20,%rsp\n\t"
2156 "movq %rdi,-8(%rbp)\n\t"
2157 "movq %rsi,-16(%rbp)");
2162 amd64_emit_epilogue (void)
2164 EMIT_ASM (amd64_epilogue
,
2165 "movq -16(%rbp),%rdi\n\t"
2166 "movq %rax,(%rdi)\n\t"
2173 amd64_emit_add (void)
2175 EMIT_ASM (amd64_add
,
2176 "add (%rsp),%rax\n\t"
2177 "lea 0x8(%rsp),%rsp");
2181 amd64_emit_sub (void)
2183 EMIT_ASM (amd64_sub
,
2184 "sub %rax,(%rsp)\n\t"
2189 amd64_emit_mul (void)
2195 amd64_emit_lsh (void)
2201 amd64_emit_rsh_signed (void)
2207 amd64_emit_rsh_unsigned (void)
2213 amd64_emit_ext (int arg
)
2218 EMIT_ASM (amd64_ext_8
,
2224 EMIT_ASM (amd64_ext_16
,
2229 EMIT_ASM (amd64_ext_32
,
2238 amd64_emit_log_not (void)
2240 EMIT_ASM (amd64_log_not
,
2241 "test %rax,%rax\n\t"
2247 amd64_emit_bit_and (void)
2249 EMIT_ASM (amd64_and
,
2250 "and (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2255 amd64_emit_bit_or (void)
2258 "or (%rsp),%rax\n\t"
2259 "lea 0x8(%rsp),%rsp");
2263 amd64_emit_bit_xor (void)
2265 EMIT_ASM (amd64_xor
,
2266 "xor (%rsp),%rax\n\t"
2267 "lea 0x8(%rsp),%rsp");
2271 amd64_emit_bit_not (void)
2273 EMIT_ASM (amd64_bit_not
,
2274 "xorq $0xffffffffffffffff,%rax");
2278 amd64_emit_equal (void)
2280 EMIT_ASM (amd64_equal
,
2281 "cmp %rax,(%rsp)\n\t"
2282 "je .Lamd64_equal_true\n\t"
2284 "jmp .Lamd64_equal_end\n\t"
2285 ".Lamd64_equal_true:\n\t"
2287 ".Lamd64_equal_end:\n\t"
2288 "lea 0x8(%rsp),%rsp");
2292 amd64_emit_less_signed (void)
2294 EMIT_ASM (amd64_less_signed
,
2295 "cmp %rax,(%rsp)\n\t"
2296 "jl .Lamd64_less_signed_true\n\t"
2298 "jmp .Lamd64_less_signed_end\n\t"
2299 ".Lamd64_less_signed_true:\n\t"
2301 ".Lamd64_less_signed_end:\n\t"
2302 "lea 0x8(%rsp),%rsp");
2306 amd64_emit_less_unsigned (void)
2308 EMIT_ASM (amd64_less_unsigned
,
2309 "cmp %rax,(%rsp)\n\t"
2310 "jb .Lamd64_less_unsigned_true\n\t"
2312 "jmp .Lamd64_less_unsigned_end\n\t"
2313 ".Lamd64_less_unsigned_true:\n\t"
2315 ".Lamd64_less_unsigned_end:\n\t"
2316 "lea 0x8(%rsp),%rsp");
2320 amd64_emit_ref (int size
)
2325 EMIT_ASM (amd64_ref1
,
2329 EMIT_ASM (amd64_ref2
,
2333 EMIT_ASM (amd64_ref4
,
2334 "movl (%rax),%eax");
2337 EMIT_ASM (amd64_ref8
,
2338 "movq (%rax),%rax");
2344 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2346 EMIT_ASM (amd64_if_goto
,
2350 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2358 amd64_emit_goto (int *offset_p
, int *size_p
)
2360 EMIT_ASM (amd64_goto
,
2361 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2369 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2371 int diff
= (to
- (from
+ size
));
2372 unsigned char buf
[sizeof (int)];
2380 memcpy (buf
, &diff
, sizeof (int));
2381 write_inferior_memory (from
, buf
, sizeof (int));
2385 amd64_emit_const (LONGEST num
)
2387 unsigned char buf
[16];
2389 CORE_ADDR buildaddr
= current_insn_ptr
;
2392 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2393 memcpy (&buf
[i
], &num
, sizeof (num
));
2395 append_insns (&buildaddr
, i
, buf
);
2396 current_insn_ptr
= buildaddr
;
2400 amd64_emit_call (CORE_ADDR fn
)
2402 unsigned char buf
[16];
2404 CORE_ADDR buildaddr
;
2407 /* The destination function being in the shared library, may be
2408 >31-bits away off the compiled code pad. */
2410 buildaddr
= current_insn_ptr
;
2412 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2416 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2418 /* Offset is too large for a call. Use callq, but that requires
2419 a register, so avoid it if possible. Use r10, since it is
2420 call-clobbered, we don't have to push/pop it. */
2421 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2423 memcpy (buf
+ i
, &fn
, 8);
2425 buf
[i
++] = 0xff; /* callq *%r10 */
2430 int offset32
= offset64
; /* we know we can't overflow here. */
2431 memcpy (buf
+ i
, &offset32
, 4);
2435 append_insns (&buildaddr
, i
, buf
);
2436 current_insn_ptr
= buildaddr
;
2440 amd64_emit_reg (int reg
)
2442 unsigned char buf
[16];
2444 CORE_ADDR buildaddr
;
2446 /* Assume raw_regs is still in %rdi. */
2447 buildaddr
= current_insn_ptr
;
2449 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2450 memcpy (&buf
[i
], ®
, sizeof (reg
));
2452 append_insns (&buildaddr
, i
, buf
);
2453 current_insn_ptr
= buildaddr
;
2454 amd64_emit_call (get_raw_reg_func_addr ());
2458 amd64_emit_pop (void)
2460 EMIT_ASM (amd64_pop
,
2465 amd64_emit_stack_flush (void)
2467 EMIT_ASM (amd64_stack_flush
,
2472 amd64_emit_zero_ext (int arg
)
2477 EMIT_ASM (amd64_zero_ext_8
,
2481 EMIT_ASM (amd64_zero_ext_16
,
2482 "and $0xffff,%rax");
2485 EMIT_ASM (amd64_zero_ext_32
,
2486 "mov $0xffffffff,%rcx\n\t"
2495 amd64_emit_swap (void)
2497 EMIT_ASM (amd64_swap
,
2504 amd64_emit_stack_adjust (int n
)
2506 unsigned char buf
[16];
2508 CORE_ADDR buildaddr
= current_insn_ptr
;
2511 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2515 /* This only handles adjustments up to 16, but we don't expect any more. */
2517 append_insns (&buildaddr
, i
, buf
);
2518 current_insn_ptr
= buildaddr
;
2521 /* FN's prototype is `LONGEST(*fn)(int)'. */
2524 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2526 unsigned char buf
[16];
2528 CORE_ADDR buildaddr
;
2530 buildaddr
= current_insn_ptr
;
2532 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2533 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2535 append_insns (&buildaddr
, i
, buf
);
2536 current_insn_ptr
= buildaddr
;
2537 amd64_emit_call (fn
);
2540 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2543 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2545 unsigned char buf
[16];
2547 CORE_ADDR buildaddr
;
2549 buildaddr
= current_insn_ptr
;
2551 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2552 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2554 append_insns (&buildaddr
, i
, buf
);
2555 current_insn_ptr
= buildaddr
;
2556 EMIT_ASM (amd64_void_call_2_a
,
2557 /* Save away a copy of the stack top. */
2559 /* Also pass top as the second argument. */
2561 amd64_emit_call (fn
);
2562 EMIT_ASM (amd64_void_call_2_b
,
2563 /* Restore the stack top, %rax may have been trashed. */
2568 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2571 "cmp %rax,(%rsp)\n\t"
2572 "jne .Lamd64_eq_fallthru\n\t"
2573 "lea 0x8(%rsp),%rsp\n\t"
2575 /* jmp, but don't trust the assembler to choose the right jump */
2576 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2577 ".Lamd64_eq_fallthru:\n\t"
2578 "lea 0x8(%rsp),%rsp\n\t"
2588 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2591 "cmp %rax,(%rsp)\n\t"
2592 "je .Lamd64_ne_fallthru\n\t"
2593 "lea 0x8(%rsp),%rsp\n\t"
2595 /* jmp, but don't trust the assembler to choose the right jump */
2596 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2597 ".Lamd64_ne_fallthru:\n\t"
2598 "lea 0x8(%rsp),%rsp\n\t"
2608 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2611 "cmp %rax,(%rsp)\n\t"
2612 "jnl .Lamd64_lt_fallthru\n\t"
2613 "lea 0x8(%rsp),%rsp\n\t"
2615 /* jmp, but don't trust the assembler to choose the right jump */
2616 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2617 ".Lamd64_lt_fallthru:\n\t"
2618 "lea 0x8(%rsp),%rsp\n\t"
2628 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2631 "cmp %rax,(%rsp)\n\t"
2632 "jnle .Lamd64_le_fallthru\n\t"
2633 "lea 0x8(%rsp),%rsp\n\t"
2635 /* jmp, but don't trust the assembler to choose the right jump */
2636 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2637 ".Lamd64_le_fallthru:\n\t"
2638 "lea 0x8(%rsp),%rsp\n\t"
2648 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2651 "cmp %rax,(%rsp)\n\t"
2652 "jng .Lamd64_gt_fallthru\n\t"
2653 "lea 0x8(%rsp),%rsp\n\t"
2655 /* jmp, but don't trust the assembler to choose the right jump */
2656 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2657 ".Lamd64_gt_fallthru:\n\t"
2658 "lea 0x8(%rsp),%rsp\n\t"
2668 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2671 "cmp %rax,(%rsp)\n\t"
2672 "jnge .Lamd64_ge_fallthru\n\t"
2673 ".Lamd64_ge_jump:\n\t"
2674 "lea 0x8(%rsp),%rsp\n\t"
2676 /* jmp, but don't trust the assembler to choose the right jump */
2677 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2678 ".Lamd64_ge_fallthru:\n\t"
2679 "lea 0x8(%rsp),%rsp\n\t"
2688 struct emit_ops amd64_emit_ops
=
2690 amd64_emit_prologue
,
2691 amd64_emit_epilogue
,
2696 amd64_emit_rsh_signed
,
2697 amd64_emit_rsh_unsigned
,
2705 amd64_emit_less_signed
,
2706 amd64_emit_less_unsigned
,
2710 amd64_write_goto_address
,
2715 amd64_emit_stack_flush
,
2716 amd64_emit_zero_ext
,
2718 amd64_emit_stack_adjust
,
2719 amd64_emit_int_call_1
,
2720 amd64_emit_void_call_2
,
2729 #endif /* __x86_64__ */
2732 i386_emit_prologue (void)
2734 EMIT_ASM32 (i386_prologue
,
2738 /* At this point, the raw regs base address is at 8(%ebp), and the
2739 value pointer is at 12(%ebp). */
2743 i386_emit_epilogue (void)
2745 EMIT_ASM32 (i386_epilogue
,
2746 "mov 12(%ebp),%ecx\n\t"
2747 "mov %eax,(%ecx)\n\t"
2748 "mov %ebx,0x4(%ecx)\n\t"
2756 i386_emit_add (void)
2758 EMIT_ASM32 (i386_add
,
2759 "add (%esp),%eax\n\t"
2760 "adc 0x4(%esp),%ebx\n\t"
2761 "lea 0x8(%esp),%esp");
2765 i386_emit_sub (void)
2767 EMIT_ASM32 (i386_sub
,
2768 "subl %eax,(%esp)\n\t"
2769 "sbbl %ebx,4(%esp)\n\t"
2775 i386_emit_mul (void)
2781 i386_emit_lsh (void)
2787 i386_emit_rsh_signed (void)
2793 i386_emit_rsh_unsigned (void)
2799 i386_emit_ext (int arg
)
2804 EMIT_ASM32 (i386_ext_8
,
2807 "movl %eax,%ebx\n\t"
2811 EMIT_ASM32 (i386_ext_16
,
2813 "movl %eax,%ebx\n\t"
2817 EMIT_ASM32 (i386_ext_32
,
2818 "movl %eax,%ebx\n\t"
2827 i386_emit_log_not (void)
2829 EMIT_ASM32 (i386_log_not
,
2831 "test %eax,%eax\n\t"
2838 i386_emit_bit_and (void)
2840 EMIT_ASM32 (i386_and
,
2841 "and (%esp),%eax\n\t"
2842 "and 0x4(%esp),%ebx\n\t"
2843 "lea 0x8(%esp),%esp");
2847 i386_emit_bit_or (void)
2849 EMIT_ASM32 (i386_or
,
2850 "or (%esp),%eax\n\t"
2851 "or 0x4(%esp),%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2856 i386_emit_bit_xor (void)
2858 EMIT_ASM32 (i386_xor
,
2859 "xor (%esp),%eax\n\t"
2860 "xor 0x4(%esp),%ebx\n\t"
2861 "lea 0x8(%esp),%esp");
2865 i386_emit_bit_not (void)
2867 EMIT_ASM32 (i386_bit_not
,
2868 "xor $0xffffffff,%eax\n\t"
2869 "xor $0xffffffff,%ebx\n\t");
2873 i386_emit_equal (void)
2875 EMIT_ASM32 (i386_equal
,
2876 "cmpl %ebx,4(%esp)\n\t"
2877 "jne .Li386_equal_false\n\t"
2878 "cmpl %eax,(%esp)\n\t"
2879 "je .Li386_equal_true\n\t"
2880 ".Li386_equal_false:\n\t"
2882 "jmp .Li386_equal_end\n\t"
2883 ".Li386_equal_true:\n\t"
2885 ".Li386_equal_end:\n\t"
2887 "lea 0x8(%esp),%esp");
2891 i386_emit_less_signed (void)
2893 EMIT_ASM32 (i386_less_signed
,
2894 "cmpl %ebx,4(%esp)\n\t"
2895 "jl .Li386_less_signed_true\n\t"
2896 "jne .Li386_less_signed_false\n\t"
2897 "cmpl %eax,(%esp)\n\t"
2898 "jl .Li386_less_signed_true\n\t"
2899 ".Li386_less_signed_false:\n\t"
2901 "jmp .Li386_less_signed_end\n\t"
2902 ".Li386_less_signed_true:\n\t"
2904 ".Li386_less_signed_end:\n\t"
2906 "lea 0x8(%esp),%esp");
2910 i386_emit_less_unsigned (void)
2912 EMIT_ASM32 (i386_less_unsigned
,
2913 "cmpl %ebx,4(%esp)\n\t"
2914 "jb .Li386_less_unsigned_true\n\t"
2915 "jne .Li386_less_unsigned_false\n\t"
2916 "cmpl %eax,(%esp)\n\t"
2917 "jb .Li386_less_unsigned_true\n\t"
2918 ".Li386_less_unsigned_false:\n\t"
2920 "jmp .Li386_less_unsigned_end\n\t"
2921 ".Li386_less_unsigned_true:\n\t"
2923 ".Li386_less_unsigned_end:\n\t"
2925 "lea 0x8(%esp),%esp");
2929 i386_emit_ref (int size
)
2934 EMIT_ASM32 (i386_ref1
,
2938 EMIT_ASM32 (i386_ref2
,
2942 EMIT_ASM32 (i386_ref4
,
2943 "movl (%eax),%eax");
2946 EMIT_ASM32 (i386_ref8
,
2947 "movl 4(%eax),%ebx\n\t"
2948 "movl (%eax),%eax");
2954 i386_emit_if_goto (int *offset_p
, int *size_p
)
2956 EMIT_ASM32 (i386_if_goto
,
2962 /* Don't trust the assembler to choose the right jump */
2963 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2966 *offset_p
= 11; /* be sure that this matches the sequence above */
2972 i386_emit_goto (int *offset_p
, int *size_p
)
2974 EMIT_ASM32 (i386_goto
,
2975 /* Don't trust the assembler to choose the right jump */
2976 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2984 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2986 int diff
= (to
- (from
+ size
));
2987 unsigned char buf
[sizeof (int)];
2989 /* We're only doing 4-byte sizes at the moment. */
2996 memcpy (buf
, &diff
, sizeof (int));
2997 write_inferior_memory (from
, buf
, sizeof (int));
3001 i386_emit_const (LONGEST num
)
3003 unsigned char buf
[16];
3005 CORE_ADDR buildaddr
= current_insn_ptr
;
3008 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3009 lo
= num
& 0xffffffff;
3010 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3012 hi
= ((num
>> 32) & 0xffffffff);
3015 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3016 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3021 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3023 append_insns (&buildaddr
, i
, buf
);
3024 current_insn_ptr
= buildaddr
;
3028 i386_emit_call (CORE_ADDR fn
)
3030 unsigned char buf
[16];
3032 CORE_ADDR buildaddr
;
3034 buildaddr
= current_insn_ptr
;
3036 buf
[i
++] = 0xe8; /* call <reladdr> */
3037 offset
= ((int) fn
) - (buildaddr
+ 5);
3038 memcpy (buf
+ 1, &offset
, 4);
3039 append_insns (&buildaddr
, 5, buf
);
3040 current_insn_ptr
= buildaddr
;
3044 i386_emit_reg (int reg
)
3046 unsigned char buf
[16];
3048 CORE_ADDR buildaddr
;
3050 EMIT_ASM32 (i386_reg_a
,
3052 buildaddr
= current_insn_ptr
;
3054 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3055 memcpy (&buf
[i
], ®
, sizeof (reg
));
3057 append_insns (&buildaddr
, i
, buf
);
3058 current_insn_ptr
= buildaddr
;
3059 EMIT_ASM32 (i386_reg_b
,
3060 "mov %eax,4(%esp)\n\t"
3061 "mov 8(%ebp),%eax\n\t"
3063 i386_emit_call (get_raw_reg_func_addr ());
3064 EMIT_ASM32 (i386_reg_c
,
3066 "lea 0x8(%esp),%esp");
3070 i386_emit_pop (void)
3072 EMIT_ASM32 (i386_pop
,
3078 i386_emit_stack_flush (void)
3080 EMIT_ASM32 (i386_stack_flush
,
3086 i386_emit_zero_ext (int arg
)
3091 EMIT_ASM32 (i386_zero_ext_8
,
3092 "and $0xff,%eax\n\t"
3096 EMIT_ASM32 (i386_zero_ext_16
,
3097 "and $0xffff,%eax\n\t"
3101 EMIT_ASM32 (i386_zero_ext_32
,
3110 i386_emit_swap (void)
3112 EMIT_ASM32 (i386_swap
,
3122 i386_emit_stack_adjust (int n
)
3124 unsigned char buf
[16];
3126 CORE_ADDR buildaddr
= current_insn_ptr
;
3129 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3133 append_insns (&buildaddr
, i
, buf
);
3134 current_insn_ptr
= buildaddr
;
3137 /* FN's prototype is `LONGEST(*fn)(int)'. */
3140 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3142 unsigned char buf
[16];
3144 CORE_ADDR buildaddr
;
3146 EMIT_ASM32 (i386_int_call_1_a
,
3147 /* Reserve a bit of stack space. */
3149 /* Put the one argument on the stack. */
3150 buildaddr
= current_insn_ptr
;
3152 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3155 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3157 append_insns (&buildaddr
, i
, buf
);
3158 current_insn_ptr
= buildaddr
;
3159 i386_emit_call (fn
);
3160 EMIT_ASM32 (i386_int_call_1_c
,
3162 "lea 0x8(%esp),%esp");
3165 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3168 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3170 unsigned char buf
[16];
3172 CORE_ADDR buildaddr
;
3174 EMIT_ASM32 (i386_void_call_2_a
,
3175 /* Preserve %eax only; we don't have to worry about %ebx. */
3177 /* Reserve a bit of stack space for arguments. */
3178 "sub $0x10,%esp\n\t"
3179 /* Copy "top" to the second argument position. (Note that
3180 we can't assume function won't scribble on its
3181 arguments, so don't try to restore from this.) */
3182 "mov %eax,4(%esp)\n\t"
3183 "mov %ebx,8(%esp)");
3184 /* Put the first argument on the stack. */
3185 buildaddr
= current_insn_ptr
;
3187 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3190 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3192 append_insns (&buildaddr
, i
, buf
);
3193 current_insn_ptr
= buildaddr
;
3194 i386_emit_call (fn
);
3195 EMIT_ASM32 (i386_void_call_2_b
,
3196 "lea 0x10(%esp),%esp\n\t"
3197 /* Restore original stack top. */
3203 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3206 /* Check low half first, more likely to be decider */
3207 "cmpl %eax,(%esp)\n\t"
3208 "jne .Leq_fallthru\n\t"
3209 "cmpl %ebx,4(%esp)\n\t"
3210 "jne .Leq_fallthru\n\t"
3211 "lea 0x8(%esp),%esp\n\t"
3214 /* jmp, but don't trust the assembler to choose the right jump */
3215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3216 ".Leq_fallthru:\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3228 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3231 /* Check low half first, more likely to be decider */
3232 "cmpl %eax,(%esp)\n\t"
3234 "cmpl %ebx,4(%esp)\n\t"
3235 "je .Lne_fallthru\n\t"
3237 "lea 0x8(%esp),%esp\n\t"
3240 /* jmp, but don't trust the assembler to choose the right jump */
3241 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3242 ".Lne_fallthru:\n\t"
3243 "lea 0x8(%esp),%esp\n\t"
3254 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3257 "cmpl %ebx,4(%esp)\n\t"
3259 "jne .Llt_fallthru\n\t"
3260 "cmpl %eax,(%esp)\n\t"
3261 "jnl .Llt_fallthru\n\t"
3263 "lea 0x8(%esp),%esp\n\t"
3266 /* jmp, but don't trust the assembler to choose the right jump */
3267 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3268 ".Llt_fallthru:\n\t"
3269 "lea 0x8(%esp),%esp\n\t"
3280 i386_emit_le_goto (int *offset_p
, int *size_p
)
3283 "cmpl %ebx,4(%esp)\n\t"
3285 "jne .Lle_fallthru\n\t"
3286 "cmpl %eax,(%esp)\n\t"
3287 "jnle .Lle_fallthru\n\t"
3289 "lea 0x8(%esp),%esp\n\t"
3292 /* jmp, but don't trust the assembler to choose the right jump */
3293 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3294 ".Lle_fallthru:\n\t"
3295 "lea 0x8(%esp),%esp\n\t"
3306 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3309 "cmpl %ebx,4(%esp)\n\t"
3311 "jne .Lgt_fallthru\n\t"
3312 "cmpl %eax,(%esp)\n\t"
3313 "jng .Lgt_fallthru\n\t"
3315 "lea 0x8(%esp),%esp\n\t"
3318 /* jmp, but don't trust the assembler to choose the right jump */
3319 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3320 ".Lgt_fallthru:\n\t"
3321 "lea 0x8(%esp),%esp\n\t"
3332 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3335 "cmpl %ebx,4(%esp)\n\t"
3337 "jne .Lge_fallthru\n\t"
3338 "cmpl %eax,(%esp)\n\t"
3339 "jnge .Lge_fallthru\n\t"
3341 "lea 0x8(%esp),%esp\n\t"
3344 /* jmp, but don't trust the assembler to choose the right jump */
3345 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3346 ".Lge_fallthru:\n\t"
3347 "lea 0x8(%esp),%esp\n\t"
3357 struct emit_ops i386_emit_ops
=
3365 i386_emit_rsh_signed
,
3366 i386_emit_rsh_unsigned
,
3374 i386_emit_less_signed
,
3375 i386_emit_less_unsigned
,
3379 i386_write_goto_address
,
3384 i386_emit_stack_flush
,
3387 i386_emit_stack_adjust
,
3388 i386_emit_int_call_1
,
3389 i386_emit_void_call_2
,
3399 static struct emit_ops
*
3403 if (is_64bit_tdesc ())
3404 return &amd64_emit_ops
;
3407 return &i386_emit_ops
;
3411 x86_supports_range_stepping (void)
3416 /* This is initialized assuming an amd64 target.
3417 x86_arch_setup will correct it for i386 or amd64 targets. */
3419 struct linux_target_ops the_low_target
=
3422 x86_linux_regs_info
,
3423 x86_cannot_fetch_register
,
3424 x86_cannot_store_register
,
3425 NULL
, /* fetch_register */
3433 x86_supports_z_point_type
,
3436 x86_stopped_by_watchpoint
,
3437 x86_stopped_data_address
,
3438 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3439 native i386 case (no registers smaller than an xfer unit), and are not
3440 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3443 /* need to fix up i386 siginfo if host is amd64 */
3445 x86_linux_new_process
,
3446 x86_linux_new_thread
,
3447 x86_linux_prepare_to_resume
,
3448 x86_linux_process_qsupported
,
3449 x86_supports_tracepoints
,
3450 x86_get_thread_area
,
3451 x86_install_fast_tracepoint_jump_pad
,
3453 x86_get_min_fast_tracepoint_insn_len
,
3454 x86_supports_range_stepping
,
3458 initialize_low_arch (void)
3460 /* Initialize the Linux target descriptions. */
3462 init_registers_amd64_linux ();
3463 init_registers_amd64_avx_linux ();
3464 init_registers_amd64_avx512_linux ();
3465 init_registers_amd64_mpx_linux ();
3467 init_registers_x32_linux ();
3468 init_registers_x32_avx_linux ();
3469 init_registers_x32_avx512_linux ();
3471 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3472 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3473 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3475 init_registers_i386_linux ();
3476 init_registers_i386_mmx_linux ();
3477 init_registers_i386_avx_linux ();
3478 init_registers_i386_avx512_linux ();
3479 init_registers_i386_mpx_linux ();
3481 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3482 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3483 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3485 initialize_regsets_info (&x86_regsets_info
);