1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct x86_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
182 static const int x86_64_regmap
[] =
184 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
185 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
186 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
187 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
188 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
189 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
212 #else /* ! __x86_64__ */
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap
[] =
218 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
219 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
220 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
221 DS
* 4, ES
* 4, FS
* 4, GS
* 4
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
232 /* Returns true if the current inferior belongs to a x86-64 process,
236 is_64bit_tdesc (void)
238 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
240 return register_size (regcache
->tdesc
, 0) == 8;
246 /* Called by libthread_db. */
249 ps_get_thread_area (const struct ps_prochandle
*ph
,
250 lwpid_t lwpid
, int idx
, void **base
)
253 int use_64bit
= is_64bit_tdesc ();
260 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
264 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
275 unsigned int desc
[4];
277 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
278 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base
= (void *) (uintptr_t) desc
[1];
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
293 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
296 int use_64bit
= is_64bit_tdesc ();
301 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
303 *addr
= (CORE_ADDR
) (uintptr_t) base
;
312 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
313 struct thread_info
*thr
= get_lwp_thread (lwp
);
314 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
315 unsigned int desc
[4];
317 const int reg_thread_area
= 3; /* bits to scale down register value. */
320 collect_register_by_name (regcache
, "gs", &gs
);
322 idx
= gs
>> reg_thread_area
;
324 if (ptrace (PTRACE_GET_THREAD_AREA
,
326 (void *) (long) idx
, (unsigned long) &desc
) < 0)
337 x86_cannot_store_register (int regno
)
340 if (is_64bit_tdesc ())
344 return regno
>= I386_NUM_REGS
;
348 x86_cannot_fetch_register (int regno
)
351 if (is_64bit_tdesc ())
355 return regno
>= I386_NUM_REGS
;
359 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
364 if (register_size (regcache
->tdesc
, 0) == 8)
366 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
367 if (x86_64_regmap
[i
] != -1)
368 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
377 for (i
= 0; i
< I386_NUM_REGS
; i
++)
378 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
380 collect_register_by_name (regcache
, "orig_eax",
381 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
385 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
390 if (register_size (regcache
->tdesc
, 0) == 8)
392 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
393 if (x86_64_regmap
[i
] != -1)
394 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
399 for (i
= 0; i
< I386_NUM_REGS
; i
++)
400 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
402 supply_register_by_name (regcache
, "orig_eax",
403 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
407 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
410 i387_cache_to_fxsave (regcache
, buf
);
412 i387_cache_to_fsave (regcache
, buf
);
417 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
420 i387_fxsave_to_cache (regcache
, buf
);
422 i387_fsave_to_cache (regcache
, buf
);
429 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
431 i387_cache_to_fxsave (regcache
, buf
);
435 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
437 i387_fxsave_to_cache (regcache
, buf
);
443 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
445 i387_cache_to_xsave (regcache
, buf
);
449 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
451 i387_xsave_to_cache (regcache
, buf
);
454 /* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
459 and update the supported regsets accordingly. */
461 static struct regset_info x86_regsets
[] =
463 #ifdef HAVE_PTRACE_GETREGS
464 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
466 x86_fill_gregset
, x86_store_gregset
},
467 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
468 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
470 # ifdef HAVE_PTRACE_GETFPXREGS
471 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
473 x86_fill_fpxregset
, x86_store_fpxregset
},
476 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
478 x86_fill_fpregset
, x86_store_fpregset
},
479 #endif /* HAVE_PTRACE_GETREGS */
480 { 0, 0, 0, -1, -1, NULL
, NULL
}
484 x86_get_pc (struct regcache
*regcache
)
486 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
491 collect_register_by_name (regcache
, "rip", &pc
);
492 return (CORE_ADDR
) pc
;
497 collect_register_by_name (regcache
, "eip", &pc
);
498 return (CORE_ADDR
) pc
;
503 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
505 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
509 unsigned long newpc
= pc
;
510 supply_register_by_name (regcache
, "rip", &newpc
);
514 unsigned int newpc
= pc
;
515 supply_register_by_name (regcache
, "eip", &newpc
);
519 static const unsigned char x86_breakpoint
[] = { 0xCC };
520 #define x86_breakpoint_len 1
523 x86_breakpoint_at (CORE_ADDR pc
)
527 (*the_target
->read_memory
) (pc
, &c
, 1);
535 /* Return the offset of REGNUM in the u_debugreg field of struct
539 u_debugreg_offset (int regnum
)
541 return (offsetof (struct user
, u_debugreg
)
542 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
546 /* Support for debug registers. */
549 x86_linux_dr_get (ptid_t ptid
, int regnum
)
554 tid
= ptid_get_lwp (ptid
);
557 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
559 error ("Couldn't read debug register");
565 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
569 tid
= ptid_get_lwp (ptid
);
572 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
574 error ("Couldn't write debug register");
578 update_debug_registers_callback (struct inferior_list_entry
*entry
,
581 struct thread_info
*thr
= (struct thread_info
*) entry
;
582 struct lwp_info
*lwp
= get_thread_lwp (thr
);
583 int pid
= *(int *) pid_p
;
585 /* Only update the threads of this process. */
586 if (pid_of (thr
) == pid
)
588 /* The actual update is done later just before resuming the lwp,
589 we just mark that the registers need updating. */
590 lwp
->arch_private
->debug_registers_changed
= 1;
592 /* If the lwp isn't stopped, force it to momentarily pause, so
593 we can update its debug registers. */
595 linux_stop_lwp (lwp
);
601 /* Update the inferior's debug register REGNUM from STATE. */
604 x86_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
606 /* Only update the threads of this process. */
607 int pid
= pid_of (current_thread
);
609 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
611 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
614 /* Return the inferior's debug register REGNUM. */
617 x86_dr_low_get_addr (int regnum
)
619 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
621 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
624 /* Update the inferior's DR7 debug control register from STATE. */
627 x86_dr_low_set_control (unsigned long control
)
629 /* Only update the threads of this process. */
630 int pid
= pid_of (current_thread
);
632 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
635 /* Return the inferior's DR7 debug control register. */
638 x86_dr_low_get_control (void)
640 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
643 /* Get the value of the DR6 debug status register from the inferior
644 and record it in STATE. */
647 x86_dr_low_get_status (void)
649 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
652 /* Low-level function vector. */
653 struct x86_dr_low_type x86_dr_low
=
655 x86_dr_low_set_control
,
658 x86_dr_low_get_status
,
659 x86_dr_low_get_control
,
663 /* Breakpoint/Watchpoint support. */
666 x86_supports_z_point_type (char z_type
)
672 case Z_PACKET_WRITE_WP
:
673 case Z_PACKET_ACCESS_WP
:
681 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
682 int size
, struct raw_breakpoint
*bp
)
684 struct process_info
*proc
= current_process ();
688 case raw_bkpt_type_sw
:
689 return insert_memory_breakpoint (bp
);
691 case raw_bkpt_type_hw
:
692 case raw_bkpt_type_write_wp
:
693 case raw_bkpt_type_access_wp
:
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type
);
697 struct x86_debug_reg_state
*state
698 = &proc
->priv
->arch_private
->debug_reg_state
;
700 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
710 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
711 int size
, struct raw_breakpoint
*bp
)
713 struct process_info
*proc
= current_process ();
717 case raw_bkpt_type_sw
:
718 return remove_memory_breakpoint (bp
);
720 case raw_bkpt_type_hw
:
721 case raw_bkpt_type_write_wp
:
722 case raw_bkpt_type_access_wp
:
724 enum target_hw_bp_type hw_type
725 = raw_bkpt_type_to_target_hw_bp_type (type
);
726 struct x86_debug_reg_state
*state
727 = &proc
->priv
->arch_private
->debug_reg_state
;
729 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
738 x86_stopped_by_watchpoint (void)
740 struct process_info
*proc
= current_process ();
741 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
745 x86_stopped_data_address (void)
747 struct process_info
*proc
= current_process ();
749 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
755 /* Called when a new process is created. */
757 static struct arch_process_info
*
758 x86_linux_new_process (void)
760 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
762 x86_low_init_dregs (&info
->debug_reg_state
);
767 /* Called when a new thread is detected. */
769 static struct arch_lwp_info
*
770 x86_linux_new_thread (void)
772 struct arch_lwp_info
*info
= XCNEW (struct arch_lwp_info
);
774 info
->debug_registers_changed
= 1;
779 /* Called when resuming a thread.
780 If the debug regs have changed, update the thread's copies. */
783 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
785 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
786 int clear_status
= 0;
788 if (lwp
->arch_private
->debug_registers_changed
)
791 int pid
= ptid_get_pid (ptid
);
792 struct process_info
*proc
= find_process_pid (pid
);
793 struct x86_debug_reg_state
*state
794 = &proc
->priv
->arch_private
->debug_reg_state
;
796 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
798 ALL_DEBUG_ADDRESS_REGISTERS (i
)
799 if (state
->dr_ref_count
[i
] > 0)
801 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
803 /* If we're setting a watchpoint, any change the inferior
804 had done itself to the debug registers needs to be
805 discarded, otherwise, x86_dr_stopped_data_address can
810 if (state
->dr_control_mirror
!= 0)
811 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
813 lwp
->arch_private
->debug_registers_changed
= 0;
816 if (clear_status
|| lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
817 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
820 /* When GDBSERVER is built as a 64-bit application on linux, the
821 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
822 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
823 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
824 conversion in-place ourselves. */
826 /* These types below (compat_*) define a siginfo type that is layout
827 compatible with the siginfo type exported by the 32-bit userspace
832 typedef int compat_int_t
;
833 typedef unsigned int compat_uptr_t
;
835 typedef int compat_time_t
;
836 typedef int compat_timer_t
;
837 typedef int compat_clock_t
;
839 struct compat_timeval
841 compat_time_t tv_sec
;
845 typedef union compat_sigval
847 compat_int_t sival_int
;
848 compat_uptr_t sival_ptr
;
851 typedef struct compat_siginfo
859 int _pad
[((128 / sizeof (int)) - 3)];
868 /* POSIX.1b timers */
873 compat_sigval_t _sigval
;
876 /* POSIX.1b signals */
881 compat_sigval_t _sigval
;
890 compat_clock_t _utime
;
891 compat_clock_t _stime
;
894 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
909 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
910 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
912 typedef struct compat_x32_siginfo
920 int _pad
[((128 / sizeof (int)) - 3)];
929 /* POSIX.1b timers */
934 compat_sigval_t _sigval
;
937 /* POSIX.1b signals */
942 compat_sigval_t _sigval
;
951 compat_x32_clock_t _utime
;
952 compat_x32_clock_t _stime
;
955 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
968 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
970 #define cpt_si_pid _sifields._kill._pid
971 #define cpt_si_uid _sifields._kill._uid
972 #define cpt_si_timerid _sifields._timer._tid
973 #define cpt_si_overrun _sifields._timer._overrun
974 #define cpt_si_status _sifields._sigchld._status
975 #define cpt_si_utime _sifields._sigchld._utime
976 #define cpt_si_stime _sifields._sigchld._stime
977 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
978 #define cpt_si_addr _sifields._sigfault._addr
979 #define cpt_si_band _sifields._sigpoll._band
980 #define cpt_si_fd _sifields._sigpoll._fd
982 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
983 In their place is si_timer1,si_timer2. */
985 #define si_timerid si_timer1
988 #define si_overrun si_timer2
992 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
994 memset (to
, 0, sizeof (*to
));
996 to
->si_signo
= from
->si_signo
;
997 to
->si_errno
= from
->si_errno
;
998 to
->si_code
= from
->si_code
;
1000 if (to
->si_code
== SI_TIMER
)
1002 to
->cpt_si_timerid
= from
->si_timerid
;
1003 to
->cpt_si_overrun
= from
->si_overrun
;
1004 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1006 else if (to
->si_code
== SI_USER
)
1008 to
->cpt_si_pid
= from
->si_pid
;
1009 to
->cpt_si_uid
= from
->si_uid
;
1011 else if (to
->si_code
< 0)
1013 to
->cpt_si_pid
= from
->si_pid
;
1014 to
->cpt_si_uid
= from
->si_uid
;
1015 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1019 switch (to
->si_signo
)
1022 to
->cpt_si_pid
= from
->si_pid
;
1023 to
->cpt_si_uid
= from
->si_uid
;
1024 to
->cpt_si_status
= from
->si_status
;
1025 to
->cpt_si_utime
= from
->si_utime
;
1026 to
->cpt_si_stime
= from
->si_stime
;
1032 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1035 to
->cpt_si_band
= from
->si_band
;
1036 to
->cpt_si_fd
= from
->si_fd
;
1039 to
->cpt_si_pid
= from
->si_pid
;
1040 to
->cpt_si_uid
= from
->si_uid
;
1041 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1048 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1050 memset (to
, 0, sizeof (*to
));
1052 to
->si_signo
= from
->si_signo
;
1053 to
->si_errno
= from
->si_errno
;
1054 to
->si_code
= from
->si_code
;
1056 if (to
->si_code
== SI_TIMER
)
1058 to
->si_timerid
= from
->cpt_si_timerid
;
1059 to
->si_overrun
= from
->cpt_si_overrun
;
1060 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1062 else if (to
->si_code
== SI_USER
)
1064 to
->si_pid
= from
->cpt_si_pid
;
1065 to
->si_uid
= from
->cpt_si_uid
;
1067 else if (to
->si_code
< 0)
1069 to
->si_pid
= from
->cpt_si_pid
;
1070 to
->si_uid
= from
->cpt_si_uid
;
1071 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1075 switch (to
->si_signo
)
1078 to
->si_pid
= from
->cpt_si_pid
;
1079 to
->si_uid
= from
->cpt_si_uid
;
1080 to
->si_status
= from
->cpt_si_status
;
1081 to
->si_utime
= from
->cpt_si_utime
;
1082 to
->si_stime
= from
->cpt_si_stime
;
1088 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1091 to
->si_band
= from
->cpt_si_band
;
1092 to
->si_fd
= from
->cpt_si_fd
;
1095 to
->si_pid
= from
->cpt_si_pid
;
1096 to
->si_uid
= from
->cpt_si_uid
;
1097 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1104 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1107 memset (to
, 0, sizeof (*to
));
1109 to
->si_signo
= from
->si_signo
;
1110 to
->si_errno
= from
->si_errno
;
1111 to
->si_code
= from
->si_code
;
1113 if (to
->si_code
== SI_TIMER
)
1115 to
->cpt_si_timerid
= from
->si_timerid
;
1116 to
->cpt_si_overrun
= from
->si_overrun
;
1117 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1119 else if (to
->si_code
== SI_USER
)
1121 to
->cpt_si_pid
= from
->si_pid
;
1122 to
->cpt_si_uid
= from
->si_uid
;
1124 else if (to
->si_code
< 0)
1126 to
->cpt_si_pid
= from
->si_pid
;
1127 to
->cpt_si_uid
= from
->si_uid
;
1128 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1132 switch (to
->si_signo
)
1135 to
->cpt_si_pid
= from
->si_pid
;
1136 to
->cpt_si_uid
= from
->si_uid
;
1137 to
->cpt_si_status
= from
->si_status
;
1138 to
->cpt_si_utime
= from
->si_utime
;
1139 to
->cpt_si_stime
= from
->si_stime
;
1145 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1148 to
->cpt_si_band
= from
->si_band
;
1149 to
->cpt_si_fd
= from
->si_fd
;
1152 to
->cpt_si_pid
= from
->si_pid
;
1153 to
->cpt_si_uid
= from
->si_uid
;
1154 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1161 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1162 compat_x32_siginfo_t
*from
)
1164 memset (to
, 0, sizeof (*to
));
1166 to
->si_signo
= from
->si_signo
;
1167 to
->si_errno
= from
->si_errno
;
1168 to
->si_code
= from
->si_code
;
1170 if (to
->si_code
== SI_TIMER
)
1172 to
->si_timerid
= from
->cpt_si_timerid
;
1173 to
->si_overrun
= from
->cpt_si_overrun
;
1174 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1176 else if (to
->si_code
== SI_USER
)
1178 to
->si_pid
= from
->cpt_si_pid
;
1179 to
->si_uid
= from
->cpt_si_uid
;
1181 else if (to
->si_code
< 0)
1183 to
->si_pid
= from
->cpt_si_pid
;
1184 to
->si_uid
= from
->cpt_si_uid
;
1185 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1189 switch (to
->si_signo
)
1192 to
->si_pid
= from
->cpt_si_pid
;
1193 to
->si_uid
= from
->cpt_si_uid
;
1194 to
->si_status
= from
->cpt_si_status
;
1195 to
->si_utime
= from
->cpt_si_utime
;
1196 to
->si_stime
= from
->cpt_si_stime
;
1202 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1205 to
->si_band
= from
->cpt_si_band
;
1206 to
->si_fd
= from
->cpt_si_fd
;
1209 to
->si_pid
= from
->cpt_si_pid
;
1210 to
->si_uid
= from
->cpt_si_uid
;
1211 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1217 #endif /* __x86_64__ */
1219 /* Convert a native/host siginfo object, into/from the siginfo in the
1220 layout of the inferiors' architecture. Returns true if any
1221 conversion was done; false otherwise. If DIRECTION is 1, then copy
1222 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1226 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1229 unsigned int machine
;
1230 int tid
= lwpid_of (current_thread
);
1231 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1233 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1234 if (!is_64bit_tdesc ())
1236 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1239 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1241 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1245 /* No fixup for native x32 GDB. */
1246 else if (!is_elf64
&& sizeof (void *) == 8)
1248 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1251 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1254 siginfo_from_compat_x32_siginfo (native
,
1255 (struct compat_x32_siginfo
*) inf
);
1266 /* Format of XSAVE extended state is:
1269 fxsave_bytes[0..463]
1270 sw_usable_bytes[464..511]
1271 xstate_hdr_bytes[512..575]
1276 Same memory layout will be used for the coredump NT_X86_XSTATE
1277 representing the XSAVE extended state registers.
1279 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1280 extended state mask, which is the same as the extended control register
1281 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1282 together with the mask saved in the xstate_hdr_bytes to determine what
1283 states the processor/OS supports and what state, used or initialized,
1284 the process/thread is in. */
1285 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1287 /* Does the current host support the GETFPXREGS request? The header
1288 file may or may not define it, and even if it is defined, the
1289 kernel will return EIO if it's running on a pre-SSE processor. */
1290 int have_ptrace_getfpxregs
=
1291 #ifdef HAVE_PTRACE_GETFPXREGS
1298 /* Does the current host support PTRACE_GETREGSET? */
1299 static int have_ptrace_getregset
= -1;
1301 /* Get Linux/x86 target description from running target. */
1303 static const struct target_desc
*
1304 x86_linux_read_description (void)
1306 unsigned int machine
;
1310 static uint64_t xcr0
;
1311 struct regset_info
*regset
;
1313 tid
= lwpid_of (current_thread
);
1315 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1317 if (sizeof (void *) == 4)
1320 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1322 else if (machine
== EM_X86_64
)
1323 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1327 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1328 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1330 elf_fpxregset_t fpxregs
;
1332 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1334 have_ptrace_getfpxregs
= 0;
1335 have_ptrace_getregset
= 0;
1336 return tdesc_i386_mmx_linux
;
1339 have_ptrace_getfpxregs
= 1;
1345 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1347 /* Don't use XML. */
1349 if (machine
== EM_X86_64
)
1350 return tdesc_amd64_linux_no_xml
;
1353 return tdesc_i386_linux_no_xml
;
1356 if (have_ptrace_getregset
== -1)
1358 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1361 iov
.iov_base
= xstateregs
;
1362 iov
.iov_len
= sizeof (xstateregs
);
1364 /* Check if PTRACE_GETREGSET works. */
1365 if (ptrace (PTRACE_GETREGSET
, tid
,
1366 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1367 have_ptrace_getregset
= 0;
1370 have_ptrace_getregset
= 1;
1372 /* Get XCR0 from XSAVE extended state. */
1373 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1374 / sizeof (uint64_t))];
1376 /* Use PTRACE_GETREGSET if it is available. */
1377 for (regset
= x86_regsets
;
1378 regset
->fill_function
!= NULL
; regset
++)
1379 if (regset
->get_request
== PTRACE_GETREGSET
)
1380 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1381 else if (regset
->type
!= GENERAL_REGS
)
1386 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1387 xcr0_features
= (have_ptrace_getregset
1388 && (xcr0
& X86_XSTATE_ALL_MASK
));
1393 if (machine
== EM_X86_64
)
1400 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1402 case X86_XSTATE_AVX512_MASK
:
1403 return tdesc_amd64_avx512_linux
;
1405 case X86_XSTATE_MPX_MASK
:
1406 return tdesc_amd64_mpx_linux
;
1408 case X86_XSTATE_AVX_MASK
:
1409 return tdesc_amd64_avx_linux
;
1412 return tdesc_amd64_linux
;
1416 return tdesc_amd64_linux
;
1422 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1424 case X86_XSTATE_AVX512_MASK
:
1425 return tdesc_x32_avx512_linux
;
1427 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1428 case X86_XSTATE_AVX_MASK
:
1429 return tdesc_x32_avx_linux
;
1432 return tdesc_x32_linux
;
1436 return tdesc_x32_linux
;
1444 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1446 case (X86_XSTATE_AVX512_MASK
):
1447 return tdesc_i386_avx512_linux
;
1449 case (X86_XSTATE_MPX_MASK
):
1450 return tdesc_i386_mpx_linux
;
1452 case (X86_XSTATE_AVX_MASK
):
1453 return tdesc_i386_avx_linux
;
1456 return tdesc_i386_linux
;
1460 return tdesc_i386_linux
;
1463 gdb_assert_not_reached ("failed to return tdesc");
1466 /* Callback for find_inferior. Stops iteration when a thread with a
1467 given PID is found. */
1470 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1472 int pid
= *(int *) data
;
1474 return (ptid_get_pid (entry
->id
) == pid
);
1477 /* Callback for for_each_inferior. Calls the arch_setup routine for
1481 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1483 int pid
= ptid_get_pid (entry
->id
);
1485 /* Look up any thread of this processes. */
1487 = (struct thread_info
*) find_inferior (&all_threads
,
1488 same_process_callback
, &pid
);
1490 the_low_target
.arch_setup ();
1493 /* Update all the target description of all processes; a new GDB
1494 connected, and it may or not support xml target descriptions. */
1497 x86_linux_update_xmltarget (void)
1499 struct thread_info
*saved_thread
= current_thread
;
1501 /* Before changing the register cache's internal layout, flush the
1502 contents of the current valid caches back to the threads, and
1503 release the current regcache objects. */
1504 regcache_release ();
1506 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1508 current_thread
= saved_thread
;
1511 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1512 PTRACE_GETREGSET. */
1515 x86_linux_process_qsupported (const char *query
)
1517 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1518 with "i386" in qSupported query, it supports x86 XML target
1521 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1523 char *copy
= xstrdup (query
+ 13);
1526 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1528 if (strcmp (p
, "i386") == 0)
1538 x86_linux_update_xmltarget ();
1541 /* Common for x86/x86-64. */
1543 static struct regsets_info x86_regsets_info
=
1545 x86_regsets
, /* regsets */
1546 0, /* num_regsets */
1547 NULL
, /* disabled_regsets */
1551 static struct regs_info amd64_linux_regs_info
=
1553 NULL
, /* regset_bitmap */
1554 NULL
, /* usrregs_info */
1558 static struct usrregs_info i386_linux_usrregs_info
=
1564 static struct regs_info i386_linux_regs_info
=
1566 NULL
, /* regset_bitmap */
1567 &i386_linux_usrregs_info
,
1571 const struct regs_info
*
1572 x86_linux_regs_info (void)
1575 if (is_64bit_tdesc ())
1576 return &amd64_linux_regs_info
;
1579 return &i386_linux_regs_info
;
1582 /* Initialize the target description for the architecture of the
1586 x86_arch_setup (void)
1588 current_process ()->tdesc
= x86_linux_read_description ();
1592 x86_supports_tracepoints (void)
1598 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1600 write_inferior_memory (*to
, buf
, len
);
1605 push_opcode (unsigned char *buf
, char *op
)
1607 unsigned char *buf_org
= buf
;
1612 unsigned long ul
= strtoul (op
, &endptr
, 16);
1621 return buf
- buf_org
;
1626 /* Build a jump pad that saves registers and calls a collection
1627 function. Writes a jump instruction to the jump pad to
1628 JJUMPAD_INSN. The caller is responsible to write it in at the
1629 tracepoint address. */
1632 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1633 CORE_ADDR collector
,
1636 CORE_ADDR
*jump_entry
,
1637 CORE_ADDR
*trampoline
,
1638 ULONGEST
*trampoline_size
,
1639 unsigned char *jjump_pad_insn
,
1640 ULONGEST
*jjump_pad_insn_size
,
1641 CORE_ADDR
*adjusted_insn_addr
,
1642 CORE_ADDR
*adjusted_insn_addr_end
,
1645 unsigned char buf
[40];
1649 CORE_ADDR buildaddr
= *jump_entry
;
1651 /* Build the jump pad. */
1653 /* First, do tracepoint data collection. Save registers. */
1655 /* Need to ensure stack pointer saved first. */
1656 buf
[i
++] = 0x54; /* push %rsp */
1657 buf
[i
++] = 0x55; /* push %rbp */
1658 buf
[i
++] = 0x57; /* push %rdi */
1659 buf
[i
++] = 0x56; /* push %rsi */
1660 buf
[i
++] = 0x52; /* push %rdx */
1661 buf
[i
++] = 0x51; /* push %rcx */
1662 buf
[i
++] = 0x53; /* push %rbx */
1663 buf
[i
++] = 0x50; /* push %rax */
1664 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1665 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1666 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1667 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1668 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1669 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1670 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1671 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1672 buf
[i
++] = 0x9c; /* pushfq */
1673 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1675 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1676 i
+= sizeof (unsigned long);
1677 buf
[i
++] = 0x57; /* push %rdi */
1678 append_insns (&buildaddr
, i
, buf
);
1680 /* Stack space for the collecting_t object. */
1682 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1683 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1684 memcpy (buf
+ i
, &tpoint
, 8);
1686 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1687 i
+= push_opcode (&buf
[i
],
1688 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1689 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1690 append_insns (&buildaddr
, i
, buf
);
1694 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1695 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1697 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1698 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1699 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1700 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1701 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1702 append_insns (&buildaddr
, i
, buf
);
1704 /* Set up the gdb_collect call. */
1705 /* At this point, (stack pointer + 0x18) is the base of our saved
1709 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1710 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1712 /* tpoint address may be 64-bit wide. */
1713 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1714 memcpy (buf
+ i
, &tpoint
, 8);
1716 append_insns (&buildaddr
, i
, buf
);
1718 /* The collector function being in the shared library, may be
1719 >31-bits away off the jump pad. */
1721 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1722 memcpy (buf
+ i
, &collector
, 8);
1724 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1725 append_insns (&buildaddr
, i
, buf
);
1727 /* Clear the spin-lock. */
1729 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1730 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1731 memcpy (buf
+ i
, &lockaddr
, 8);
1733 append_insns (&buildaddr
, i
, buf
);
1735 /* Remove stack that had been used for the collect_t object. */
1737 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1738 append_insns (&buildaddr
, i
, buf
);
1740 /* Restore register state. */
1742 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1746 buf
[i
++] = 0x9d; /* popfq */
1747 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1748 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1749 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1750 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1751 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1752 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1753 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1754 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1755 buf
[i
++] = 0x58; /* pop %rax */
1756 buf
[i
++] = 0x5b; /* pop %rbx */
1757 buf
[i
++] = 0x59; /* pop %rcx */
1758 buf
[i
++] = 0x5a; /* pop %rdx */
1759 buf
[i
++] = 0x5e; /* pop %rsi */
1760 buf
[i
++] = 0x5f; /* pop %rdi */
1761 buf
[i
++] = 0x5d; /* pop %rbp */
1762 buf
[i
++] = 0x5c; /* pop %rsp */
1763 append_insns (&buildaddr
, i
, buf
);
1765 /* Now, adjust the original instruction to execute in the jump
1767 *adjusted_insn_addr
= buildaddr
;
1768 relocate_instruction (&buildaddr
, tpaddr
);
1769 *adjusted_insn_addr_end
= buildaddr
;
1771 /* Finally, write a jump back to the program. */
1773 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1774 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1777 "E.Jump back from jump pad too far from tracepoint "
1778 "(offset 0x%" PRIx64
" > int32).", loffset
);
1782 offset
= (int) loffset
;
1783 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1784 memcpy (buf
+ 1, &offset
, 4);
1785 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1787 /* The jump pad is now built. Wire in a jump to our jump pad. This
1788 is always done last (by our caller actually), so that we can
1789 install fast tracepoints with threads running. This relies on
1790 the agent's atomic write support. */
1791 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1792 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1795 "E.Jump pad too far from tracepoint "
1796 "(offset 0x%" PRIx64
" > int32).", loffset
);
1800 offset
= (int) loffset
;
1802 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1803 memcpy (buf
+ 1, &offset
, 4);
1804 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1805 *jjump_pad_insn_size
= sizeof (jump_insn
);
1807 /* Return the end address of our pad. */
1808 *jump_entry
= buildaddr
;
1813 #endif /* __x86_64__ */
1815 /* Build a jump pad that saves registers and calls a collection
1816 function. Writes a jump instruction to the jump pad to
1817 JJUMPAD_INSN. The caller is responsible to write it in at the
1818 tracepoint address. */
1821 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1822 CORE_ADDR collector
,
1825 CORE_ADDR
*jump_entry
,
1826 CORE_ADDR
*trampoline
,
1827 ULONGEST
*trampoline_size
,
1828 unsigned char *jjump_pad_insn
,
1829 ULONGEST
*jjump_pad_insn_size
,
1830 CORE_ADDR
*adjusted_insn_addr
,
1831 CORE_ADDR
*adjusted_insn_addr_end
,
1834 unsigned char buf
[0x100];
1836 CORE_ADDR buildaddr
= *jump_entry
;
1838 /* Build the jump pad. */
1840 /* First, do tracepoint data collection. Save registers. */
1842 buf
[i
++] = 0x60; /* pushad */
1843 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1844 *((int *)(buf
+ i
)) = (int) tpaddr
;
1846 buf
[i
++] = 0x9c; /* pushf */
1847 buf
[i
++] = 0x1e; /* push %ds */
1848 buf
[i
++] = 0x06; /* push %es */
1849 buf
[i
++] = 0x0f; /* push %fs */
1851 buf
[i
++] = 0x0f; /* push %gs */
1853 buf
[i
++] = 0x16; /* push %ss */
1854 buf
[i
++] = 0x0e; /* push %cs */
1855 append_insns (&buildaddr
, i
, buf
);
1857 /* Stack space for the collecting_t object. */
1859 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1861 /* Build the object. */
1862 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1863 memcpy (buf
+ i
, &tpoint
, 4);
1865 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1867 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1868 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1869 append_insns (&buildaddr
, i
, buf
);
1871 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1872 If we cared for it, this could be using xchg alternatively. */
1875 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1876 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1878 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1880 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1881 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1882 append_insns (&buildaddr
, i
, buf
);
1885 /* Set up arguments to the gdb_collect call. */
1887 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1888 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1889 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1890 append_insns (&buildaddr
, i
, buf
);
1893 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1894 append_insns (&buildaddr
, i
, buf
);
1897 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1898 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1900 append_insns (&buildaddr
, i
, buf
);
1902 buf
[0] = 0xe8; /* call <reladdr> */
1903 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1904 memcpy (buf
+ 1, &offset
, 4);
1905 append_insns (&buildaddr
, 5, buf
);
1906 /* Clean up after the call. */
1907 buf
[0] = 0x83; /* add $0x8,%esp */
1910 append_insns (&buildaddr
, 3, buf
);
1913 /* Clear the spin-lock. This would need the LOCK prefix on older
1916 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1917 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1918 memcpy (buf
+ i
, &lockaddr
, 4);
1920 append_insns (&buildaddr
, i
, buf
);
1923 /* Remove stack that had been used for the collect_t object. */
1925 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1926 append_insns (&buildaddr
, i
, buf
);
1929 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1932 buf
[i
++] = 0x17; /* pop %ss */
1933 buf
[i
++] = 0x0f; /* pop %gs */
1935 buf
[i
++] = 0x0f; /* pop %fs */
1937 buf
[i
++] = 0x07; /* pop %es */
1938 buf
[i
++] = 0x1f; /* pop %ds */
1939 buf
[i
++] = 0x9d; /* popf */
1940 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1943 buf
[i
++] = 0x61; /* popad */
1944 append_insns (&buildaddr
, i
, buf
);
1946 /* Now, adjust the original instruction to execute in the jump
1948 *adjusted_insn_addr
= buildaddr
;
1949 relocate_instruction (&buildaddr
, tpaddr
);
1950 *adjusted_insn_addr_end
= buildaddr
;
1952 /* Write the jump back to the program. */
1953 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1954 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1955 memcpy (buf
+ 1, &offset
, 4);
1956 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1958 /* The jump pad is now built. Wire in a jump to our jump pad. This
1959 is always done last (by our caller actually), so that we can
1960 install fast tracepoints with threads running. This relies on
1961 the agent's atomic write support. */
1964 /* Create a trampoline. */
1965 *trampoline_size
= sizeof (jump_insn
);
1966 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1968 /* No trampoline space available. */
1970 "E.Cannot allocate trampoline space needed for fast "
1971 "tracepoints on 4-byte instructions.");
1975 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1976 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1977 memcpy (buf
+ 1, &offset
, 4);
1978 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1980 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1981 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1982 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1983 memcpy (buf
+ 2, &offset
, 2);
1984 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1985 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1989 /* Else use a 32-bit relative jump instruction. */
1990 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1991 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1992 memcpy (buf
+ 1, &offset
, 4);
1993 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1994 *jjump_pad_insn_size
= sizeof (jump_insn
);
1997 /* Return the end address of our pad. */
1998 *jump_entry
= buildaddr
;
2004 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2005 CORE_ADDR collector
,
2008 CORE_ADDR
*jump_entry
,
2009 CORE_ADDR
*trampoline
,
2010 ULONGEST
*trampoline_size
,
2011 unsigned char *jjump_pad_insn
,
2012 ULONGEST
*jjump_pad_insn_size
,
2013 CORE_ADDR
*adjusted_insn_addr
,
2014 CORE_ADDR
*adjusted_insn_addr_end
,
2018 if (is_64bit_tdesc ())
2019 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2020 collector
, lockaddr
,
2021 orig_size
, jump_entry
,
2022 trampoline
, trampoline_size
,
2024 jjump_pad_insn_size
,
2026 adjusted_insn_addr_end
,
2030 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2031 collector
, lockaddr
,
2032 orig_size
, jump_entry
,
2033 trampoline
, trampoline_size
,
2035 jjump_pad_insn_size
,
2037 adjusted_insn_addr_end
,
2041 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2045 x86_get_min_fast_tracepoint_insn_len (void)
2047 static int warned_about_fast_tracepoints
= 0;
2050 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2051 used for fast tracepoints. */
2052 if (is_64bit_tdesc ())
2056 if (agent_loaded_p ())
2058 char errbuf
[IPA_BUFSIZ
];
2062 /* On x86, if trampolines are available, then 4-byte jump instructions
2063 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2064 with a 4-byte offset are used instead. */
2065 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2069 /* GDB has no channel to explain to user why a shorter fast
2070 tracepoint is not possible, but at least make GDBserver
2071 mention that something has gone awry. */
2072 if (!warned_about_fast_tracepoints
)
2074 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2075 warned_about_fast_tracepoints
= 1;
2082 /* Indicate that the minimum length is currently unknown since the IPA
2083 has not loaded yet. */
2089 add_insns (unsigned char *start
, int len
)
2091 CORE_ADDR buildaddr
= current_insn_ptr
;
2094 debug_printf ("Adding %d bytes of insn at %s\n",
2095 len
, paddress (buildaddr
));
2097 append_insns (&buildaddr
, len
, start
);
2098 current_insn_ptr
= buildaddr
;
2101 /* Our general strategy for emitting code is to avoid specifying raw
2102 bytes whenever possible, and instead copy a block of inline asm
2103 that is embedded in the function. This is a little messy, because
2104 we need to keep the compiler from discarding what looks like dead
2105 code, plus suppress various warnings. */
2107 #define EMIT_ASM(NAME, INSNS) \
2110 extern unsigned char start_ ## NAME, end_ ## NAME; \
2111 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2112 __asm__ ("jmp end_" #NAME "\n" \
2113 "\t" "start_" #NAME ":" \
2115 "\t" "end_" #NAME ":"); \
2120 #define EMIT_ASM32(NAME,INSNS) \
2123 extern unsigned char start_ ## NAME, end_ ## NAME; \
2124 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2125 __asm__ (".code32\n" \
2126 "\t" "jmp end_" #NAME "\n" \
2127 "\t" "start_" #NAME ":\n" \
2129 "\t" "end_" #NAME ":\n" \
2135 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2142 amd64_emit_prologue (void)
2144 EMIT_ASM (amd64_prologue
,
2146 "movq %rsp,%rbp\n\t"
2147 "sub $0x20,%rsp\n\t"
2148 "movq %rdi,-8(%rbp)\n\t"
2149 "movq %rsi,-16(%rbp)");
2154 amd64_emit_epilogue (void)
2156 EMIT_ASM (amd64_epilogue
,
2157 "movq -16(%rbp),%rdi\n\t"
2158 "movq %rax,(%rdi)\n\t"
2165 amd64_emit_add (void)
2167 EMIT_ASM (amd64_add
,
2168 "add (%rsp),%rax\n\t"
2169 "lea 0x8(%rsp),%rsp");
2173 amd64_emit_sub (void)
2175 EMIT_ASM (amd64_sub
,
2176 "sub %rax,(%rsp)\n\t"
2181 amd64_emit_mul (void)
2187 amd64_emit_lsh (void)
2193 amd64_emit_rsh_signed (void)
2199 amd64_emit_rsh_unsigned (void)
2205 amd64_emit_ext (int arg
)
2210 EMIT_ASM (amd64_ext_8
,
2216 EMIT_ASM (amd64_ext_16
,
2221 EMIT_ASM (amd64_ext_32
,
2230 amd64_emit_log_not (void)
2232 EMIT_ASM (amd64_log_not
,
2233 "test %rax,%rax\n\t"
2239 amd64_emit_bit_and (void)
2241 EMIT_ASM (amd64_and
,
2242 "and (%rsp),%rax\n\t"
2243 "lea 0x8(%rsp),%rsp");
2247 amd64_emit_bit_or (void)
2250 "or (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2255 amd64_emit_bit_xor (void)
2257 EMIT_ASM (amd64_xor
,
2258 "xor (%rsp),%rax\n\t"
2259 "lea 0x8(%rsp),%rsp");
2263 amd64_emit_bit_not (void)
2265 EMIT_ASM (amd64_bit_not
,
2266 "xorq $0xffffffffffffffff,%rax");
2270 amd64_emit_equal (void)
2272 EMIT_ASM (amd64_equal
,
2273 "cmp %rax,(%rsp)\n\t"
2274 "je .Lamd64_equal_true\n\t"
2276 "jmp .Lamd64_equal_end\n\t"
2277 ".Lamd64_equal_true:\n\t"
2279 ".Lamd64_equal_end:\n\t"
2280 "lea 0x8(%rsp),%rsp");
2284 amd64_emit_less_signed (void)
2286 EMIT_ASM (amd64_less_signed
,
2287 "cmp %rax,(%rsp)\n\t"
2288 "jl .Lamd64_less_signed_true\n\t"
2290 "jmp .Lamd64_less_signed_end\n\t"
2291 ".Lamd64_less_signed_true:\n\t"
2293 ".Lamd64_less_signed_end:\n\t"
2294 "lea 0x8(%rsp),%rsp");
2298 amd64_emit_less_unsigned (void)
2300 EMIT_ASM (amd64_less_unsigned
,
2301 "cmp %rax,(%rsp)\n\t"
2302 "jb .Lamd64_less_unsigned_true\n\t"
2304 "jmp .Lamd64_less_unsigned_end\n\t"
2305 ".Lamd64_less_unsigned_true:\n\t"
2307 ".Lamd64_less_unsigned_end:\n\t"
2308 "lea 0x8(%rsp),%rsp");
2312 amd64_emit_ref (int size
)
2317 EMIT_ASM (amd64_ref1
,
2321 EMIT_ASM (amd64_ref2
,
2325 EMIT_ASM (amd64_ref4
,
2326 "movl (%rax),%eax");
2329 EMIT_ASM (amd64_ref8
,
2330 "movq (%rax),%rax");
2336 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2338 EMIT_ASM (amd64_if_goto
,
2342 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2350 amd64_emit_goto (int *offset_p
, int *size_p
)
2352 EMIT_ASM (amd64_goto
,
2353 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2361 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2363 int diff
= (to
- (from
+ size
));
2364 unsigned char buf
[sizeof (int)];
2372 memcpy (buf
, &diff
, sizeof (int));
2373 write_inferior_memory (from
, buf
, sizeof (int));
2377 amd64_emit_const (LONGEST num
)
2379 unsigned char buf
[16];
2381 CORE_ADDR buildaddr
= current_insn_ptr
;
2384 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2385 memcpy (&buf
[i
], &num
, sizeof (num
));
2387 append_insns (&buildaddr
, i
, buf
);
2388 current_insn_ptr
= buildaddr
;
2392 amd64_emit_call (CORE_ADDR fn
)
2394 unsigned char buf
[16];
2396 CORE_ADDR buildaddr
;
2399 /* The destination function being in the shared library, may be
2400 >31-bits away off the compiled code pad. */
2402 buildaddr
= current_insn_ptr
;
2404 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2408 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2410 /* Offset is too large for a call. Use callq, but that requires
2411 a register, so avoid it if possible. Use r10, since it is
2412 call-clobbered, we don't have to push/pop it. */
2413 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2415 memcpy (buf
+ i
, &fn
, 8);
2417 buf
[i
++] = 0xff; /* callq *%r10 */
2422 int offset32
= offset64
; /* we know we can't overflow here. */
2423 memcpy (buf
+ i
, &offset32
, 4);
2427 append_insns (&buildaddr
, i
, buf
);
2428 current_insn_ptr
= buildaddr
;
2432 amd64_emit_reg (int reg
)
2434 unsigned char buf
[16];
2436 CORE_ADDR buildaddr
;
2438 /* Assume raw_regs is still in %rdi. */
2439 buildaddr
= current_insn_ptr
;
2441 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2442 memcpy (&buf
[i
], ®
, sizeof (reg
));
2444 append_insns (&buildaddr
, i
, buf
);
2445 current_insn_ptr
= buildaddr
;
2446 amd64_emit_call (get_raw_reg_func_addr ());
2450 amd64_emit_pop (void)
2452 EMIT_ASM (amd64_pop
,
2457 amd64_emit_stack_flush (void)
2459 EMIT_ASM (amd64_stack_flush
,
2464 amd64_emit_zero_ext (int arg
)
2469 EMIT_ASM (amd64_zero_ext_8
,
2473 EMIT_ASM (amd64_zero_ext_16
,
2474 "and $0xffff,%rax");
2477 EMIT_ASM (amd64_zero_ext_32
,
2478 "mov $0xffffffff,%rcx\n\t"
2487 amd64_emit_swap (void)
2489 EMIT_ASM (amd64_swap
,
2496 amd64_emit_stack_adjust (int n
)
2498 unsigned char buf
[16];
2500 CORE_ADDR buildaddr
= current_insn_ptr
;
2503 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2507 /* This only handles adjustments up to 16, but we don't expect any more. */
2509 append_insns (&buildaddr
, i
, buf
);
2510 current_insn_ptr
= buildaddr
;
2513 /* FN's prototype is `LONGEST(*fn)(int)'. */
2516 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2518 unsigned char buf
[16];
2520 CORE_ADDR buildaddr
;
2522 buildaddr
= current_insn_ptr
;
2524 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2525 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2527 append_insns (&buildaddr
, i
, buf
);
2528 current_insn_ptr
= buildaddr
;
2529 amd64_emit_call (fn
);
2532 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2535 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2537 unsigned char buf
[16];
2539 CORE_ADDR buildaddr
;
2541 buildaddr
= current_insn_ptr
;
2543 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2544 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2546 append_insns (&buildaddr
, i
, buf
);
2547 current_insn_ptr
= buildaddr
;
2548 EMIT_ASM (amd64_void_call_2_a
,
2549 /* Save away a copy of the stack top. */
2551 /* Also pass top as the second argument. */
2553 amd64_emit_call (fn
);
2554 EMIT_ASM (amd64_void_call_2_b
,
2555 /* Restore the stack top, %rax may have been trashed. */
2560 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2563 "cmp %rax,(%rsp)\n\t"
2564 "jne .Lamd64_eq_fallthru\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2567 /* jmp, but don't trust the assembler to choose the right jump */
2568 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2569 ".Lamd64_eq_fallthru:\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2580 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2583 "cmp %rax,(%rsp)\n\t"
2584 "je .Lamd64_ne_fallthru\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2587 /* jmp, but don't trust the assembler to choose the right jump */
2588 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2589 ".Lamd64_ne_fallthru:\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2600 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2603 "cmp %rax,(%rsp)\n\t"
2604 "jnl .Lamd64_lt_fallthru\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2607 /* jmp, but don't trust the assembler to choose the right jump */
2608 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2609 ".Lamd64_lt_fallthru:\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2620 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2623 "cmp %rax,(%rsp)\n\t"
2624 "jnle .Lamd64_le_fallthru\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2627 /* jmp, but don't trust the assembler to choose the right jump */
2628 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2629 ".Lamd64_le_fallthru:\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2640 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2643 "cmp %rax,(%rsp)\n\t"
2644 "jng .Lamd64_gt_fallthru\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2647 /* jmp, but don't trust the assembler to choose the right jump */
2648 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2649 ".Lamd64_gt_fallthru:\n\t"
2650 "lea 0x8(%rsp),%rsp\n\t"
2660 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2663 "cmp %rax,(%rsp)\n\t"
2664 "jnge .Lamd64_ge_fallthru\n\t"
2665 ".Lamd64_ge_jump:\n\t"
2666 "lea 0x8(%rsp),%rsp\n\t"
2668 /* jmp, but don't trust the assembler to choose the right jump */
2669 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2670 ".Lamd64_ge_fallthru:\n\t"
2671 "lea 0x8(%rsp),%rsp\n\t"
2680 struct emit_ops amd64_emit_ops
=
2682 amd64_emit_prologue
,
2683 amd64_emit_epilogue
,
2688 amd64_emit_rsh_signed
,
2689 amd64_emit_rsh_unsigned
,
2697 amd64_emit_less_signed
,
2698 amd64_emit_less_unsigned
,
2702 amd64_write_goto_address
,
2707 amd64_emit_stack_flush
,
2708 amd64_emit_zero_ext
,
2710 amd64_emit_stack_adjust
,
2711 amd64_emit_int_call_1
,
2712 amd64_emit_void_call_2
,
2721 #endif /* __x86_64__ */
2724 i386_emit_prologue (void)
2726 EMIT_ASM32 (i386_prologue
,
2730 /* At this point, the raw regs base address is at 8(%ebp), and the
2731 value pointer is at 12(%ebp). */
2735 i386_emit_epilogue (void)
2737 EMIT_ASM32 (i386_epilogue
,
2738 "mov 12(%ebp),%ecx\n\t"
2739 "mov %eax,(%ecx)\n\t"
2740 "mov %ebx,0x4(%ecx)\n\t"
2748 i386_emit_add (void)
2750 EMIT_ASM32 (i386_add
,
2751 "add (%esp),%eax\n\t"
2752 "adc 0x4(%esp),%ebx\n\t"
2753 "lea 0x8(%esp),%esp");
2757 i386_emit_sub (void)
2759 EMIT_ASM32 (i386_sub
,
2760 "subl %eax,(%esp)\n\t"
2761 "sbbl %ebx,4(%esp)\n\t"
2767 i386_emit_mul (void)
2773 i386_emit_lsh (void)
2779 i386_emit_rsh_signed (void)
2785 i386_emit_rsh_unsigned (void)
2791 i386_emit_ext (int arg
)
2796 EMIT_ASM32 (i386_ext_8
,
2799 "movl %eax,%ebx\n\t"
2803 EMIT_ASM32 (i386_ext_16
,
2805 "movl %eax,%ebx\n\t"
2809 EMIT_ASM32 (i386_ext_32
,
2810 "movl %eax,%ebx\n\t"
2819 i386_emit_log_not (void)
2821 EMIT_ASM32 (i386_log_not
,
2823 "test %eax,%eax\n\t"
2830 i386_emit_bit_and (void)
2832 EMIT_ASM32 (i386_and
,
2833 "and (%esp),%eax\n\t"
2834 "and 0x4(%esp),%ebx\n\t"
2835 "lea 0x8(%esp),%esp");
2839 i386_emit_bit_or (void)
2841 EMIT_ASM32 (i386_or
,
2842 "or (%esp),%eax\n\t"
2843 "or 0x4(%esp),%ebx\n\t"
2844 "lea 0x8(%esp),%esp");
2848 i386_emit_bit_xor (void)
2850 EMIT_ASM32 (i386_xor
,
2851 "xor (%esp),%eax\n\t"
2852 "xor 0x4(%esp),%ebx\n\t"
2853 "lea 0x8(%esp),%esp");
2857 i386_emit_bit_not (void)
2859 EMIT_ASM32 (i386_bit_not
,
2860 "xor $0xffffffff,%eax\n\t"
2861 "xor $0xffffffff,%ebx\n\t");
2865 i386_emit_equal (void)
2867 EMIT_ASM32 (i386_equal
,
2868 "cmpl %ebx,4(%esp)\n\t"
2869 "jne .Li386_equal_false\n\t"
2870 "cmpl %eax,(%esp)\n\t"
2871 "je .Li386_equal_true\n\t"
2872 ".Li386_equal_false:\n\t"
2874 "jmp .Li386_equal_end\n\t"
2875 ".Li386_equal_true:\n\t"
2877 ".Li386_equal_end:\n\t"
2879 "lea 0x8(%esp),%esp");
2883 i386_emit_less_signed (void)
2885 EMIT_ASM32 (i386_less_signed
,
2886 "cmpl %ebx,4(%esp)\n\t"
2887 "jl .Li386_less_signed_true\n\t"
2888 "jne .Li386_less_signed_false\n\t"
2889 "cmpl %eax,(%esp)\n\t"
2890 "jl .Li386_less_signed_true\n\t"
2891 ".Li386_less_signed_false:\n\t"
2893 "jmp .Li386_less_signed_end\n\t"
2894 ".Li386_less_signed_true:\n\t"
2896 ".Li386_less_signed_end:\n\t"
2898 "lea 0x8(%esp),%esp");
2902 i386_emit_less_unsigned (void)
2904 EMIT_ASM32 (i386_less_unsigned
,
2905 "cmpl %ebx,4(%esp)\n\t"
2906 "jb .Li386_less_unsigned_true\n\t"
2907 "jne .Li386_less_unsigned_false\n\t"
2908 "cmpl %eax,(%esp)\n\t"
2909 "jb .Li386_less_unsigned_true\n\t"
2910 ".Li386_less_unsigned_false:\n\t"
2912 "jmp .Li386_less_unsigned_end\n\t"
2913 ".Li386_less_unsigned_true:\n\t"
2915 ".Li386_less_unsigned_end:\n\t"
2917 "lea 0x8(%esp),%esp");
2921 i386_emit_ref (int size
)
2926 EMIT_ASM32 (i386_ref1
,
2930 EMIT_ASM32 (i386_ref2
,
2934 EMIT_ASM32 (i386_ref4
,
2935 "movl (%eax),%eax");
2938 EMIT_ASM32 (i386_ref8
,
2939 "movl 4(%eax),%ebx\n\t"
2940 "movl (%eax),%eax");
2946 i386_emit_if_goto (int *offset_p
, int *size_p
)
2948 EMIT_ASM32 (i386_if_goto
,
2954 /* Don't trust the assembler to choose the right jump */
2955 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2958 *offset_p
= 11; /* be sure that this matches the sequence above */
2964 i386_emit_goto (int *offset_p
, int *size_p
)
2966 EMIT_ASM32 (i386_goto
,
2967 /* Don't trust the assembler to choose the right jump */
2968 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2976 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2978 int diff
= (to
- (from
+ size
));
2979 unsigned char buf
[sizeof (int)];
2981 /* We're only doing 4-byte sizes at the moment. */
2988 memcpy (buf
, &diff
, sizeof (int));
2989 write_inferior_memory (from
, buf
, sizeof (int));
2993 i386_emit_const (LONGEST num
)
2995 unsigned char buf
[16];
2997 CORE_ADDR buildaddr
= current_insn_ptr
;
3000 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3001 lo
= num
& 0xffffffff;
3002 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3004 hi
= ((num
>> 32) & 0xffffffff);
3007 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3008 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3013 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3015 append_insns (&buildaddr
, i
, buf
);
3016 current_insn_ptr
= buildaddr
;
3020 i386_emit_call (CORE_ADDR fn
)
3022 unsigned char buf
[16];
3024 CORE_ADDR buildaddr
;
3026 buildaddr
= current_insn_ptr
;
3028 buf
[i
++] = 0xe8; /* call <reladdr> */
3029 offset
= ((int) fn
) - (buildaddr
+ 5);
3030 memcpy (buf
+ 1, &offset
, 4);
3031 append_insns (&buildaddr
, 5, buf
);
3032 current_insn_ptr
= buildaddr
;
3036 i386_emit_reg (int reg
)
3038 unsigned char buf
[16];
3040 CORE_ADDR buildaddr
;
3042 EMIT_ASM32 (i386_reg_a
,
3044 buildaddr
= current_insn_ptr
;
3046 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3047 memcpy (&buf
[i
], ®
, sizeof (reg
));
3049 append_insns (&buildaddr
, i
, buf
);
3050 current_insn_ptr
= buildaddr
;
3051 EMIT_ASM32 (i386_reg_b
,
3052 "mov %eax,4(%esp)\n\t"
3053 "mov 8(%ebp),%eax\n\t"
3055 i386_emit_call (get_raw_reg_func_addr ());
3056 EMIT_ASM32 (i386_reg_c
,
3058 "lea 0x8(%esp),%esp");
3062 i386_emit_pop (void)
3064 EMIT_ASM32 (i386_pop
,
3070 i386_emit_stack_flush (void)
3072 EMIT_ASM32 (i386_stack_flush
,
3078 i386_emit_zero_ext (int arg
)
3083 EMIT_ASM32 (i386_zero_ext_8
,
3084 "and $0xff,%eax\n\t"
3088 EMIT_ASM32 (i386_zero_ext_16
,
3089 "and $0xffff,%eax\n\t"
3093 EMIT_ASM32 (i386_zero_ext_32
,
3102 i386_emit_swap (void)
3104 EMIT_ASM32 (i386_swap
,
3114 i386_emit_stack_adjust (int n
)
3116 unsigned char buf
[16];
3118 CORE_ADDR buildaddr
= current_insn_ptr
;
3121 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3125 append_insns (&buildaddr
, i
, buf
);
3126 current_insn_ptr
= buildaddr
;
3129 /* FN's prototype is `LONGEST(*fn)(int)'. */
3132 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3134 unsigned char buf
[16];
3136 CORE_ADDR buildaddr
;
3138 EMIT_ASM32 (i386_int_call_1_a
,
3139 /* Reserve a bit of stack space. */
3141 /* Put the one argument on the stack. */
3142 buildaddr
= current_insn_ptr
;
3144 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3147 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3149 append_insns (&buildaddr
, i
, buf
);
3150 current_insn_ptr
= buildaddr
;
3151 i386_emit_call (fn
);
3152 EMIT_ASM32 (i386_int_call_1_c
,
3154 "lea 0x8(%esp),%esp");
3157 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3160 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3162 unsigned char buf
[16];
3164 CORE_ADDR buildaddr
;
3166 EMIT_ASM32 (i386_void_call_2_a
,
3167 /* Preserve %eax only; we don't have to worry about %ebx. */
3169 /* Reserve a bit of stack space for arguments. */
3170 "sub $0x10,%esp\n\t"
3171 /* Copy "top" to the second argument position. (Note that
3172 we can't assume function won't scribble on its
3173 arguments, so don't try to restore from this.) */
3174 "mov %eax,4(%esp)\n\t"
3175 "mov %ebx,8(%esp)");
3176 /* Put the first argument on the stack. */
3177 buildaddr
= current_insn_ptr
;
3179 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3182 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3184 append_insns (&buildaddr
, i
, buf
);
3185 current_insn_ptr
= buildaddr
;
3186 i386_emit_call (fn
);
3187 EMIT_ASM32 (i386_void_call_2_b
,
3188 "lea 0x10(%esp),%esp\n\t"
3189 /* Restore original stack top. */
3195 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3198 /* Check low half first, more likely to be decider */
3199 "cmpl %eax,(%esp)\n\t"
3200 "jne .Leq_fallthru\n\t"
3201 "cmpl %ebx,4(%esp)\n\t"
3202 "jne .Leq_fallthru\n\t"
3203 "lea 0x8(%esp),%esp\n\t"
3206 /* jmp, but don't trust the assembler to choose the right jump */
3207 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3208 ".Leq_fallthru:\n\t"
3209 "lea 0x8(%esp),%esp\n\t"
3220 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3223 /* Check low half first, more likely to be decider */
3224 "cmpl %eax,(%esp)\n\t"
3226 "cmpl %ebx,4(%esp)\n\t"
3227 "je .Lne_fallthru\n\t"
3229 "lea 0x8(%esp),%esp\n\t"
3232 /* jmp, but don't trust the assembler to choose the right jump */
3233 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3234 ".Lne_fallthru:\n\t"
3235 "lea 0x8(%esp),%esp\n\t"
3246 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3249 "cmpl %ebx,4(%esp)\n\t"
3251 "jne .Llt_fallthru\n\t"
3252 "cmpl %eax,(%esp)\n\t"
3253 "jnl .Llt_fallthru\n\t"
3255 "lea 0x8(%esp),%esp\n\t"
3258 /* jmp, but don't trust the assembler to choose the right jump */
3259 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3260 ".Llt_fallthru:\n\t"
3261 "lea 0x8(%esp),%esp\n\t"
3272 i386_emit_le_goto (int *offset_p
, int *size_p
)
3275 "cmpl %ebx,4(%esp)\n\t"
3277 "jne .Lle_fallthru\n\t"
3278 "cmpl %eax,(%esp)\n\t"
3279 "jnle .Lle_fallthru\n\t"
3281 "lea 0x8(%esp),%esp\n\t"
3284 /* jmp, but don't trust the assembler to choose the right jump */
3285 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3286 ".Lle_fallthru:\n\t"
3287 "lea 0x8(%esp),%esp\n\t"
3298 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3301 "cmpl %ebx,4(%esp)\n\t"
3303 "jne .Lgt_fallthru\n\t"
3304 "cmpl %eax,(%esp)\n\t"
3305 "jng .Lgt_fallthru\n\t"
3307 "lea 0x8(%esp),%esp\n\t"
3310 /* jmp, but don't trust the assembler to choose the right jump */
3311 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3312 ".Lgt_fallthru:\n\t"
3313 "lea 0x8(%esp),%esp\n\t"
3324 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3327 "cmpl %ebx,4(%esp)\n\t"
3329 "jne .Lge_fallthru\n\t"
3330 "cmpl %eax,(%esp)\n\t"
3331 "jnge .Lge_fallthru\n\t"
3333 "lea 0x8(%esp),%esp\n\t"
3336 /* jmp, but don't trust the assembler to choose the right jump */
3337 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3338 ".Lge_fallthru:\n\t"
3339 "lea 0x8(%esp),%esp\n\t"
3349 struct emit_ops i386_emit_ops
=
3357 i386_emit_rsh_signed
,
3358 i386_emit_rsh_unsigned
,
3366 i386_emit_less_signed
,
3367 i386_emit_less_unsigned
,
3371 i386_write_goto_address
,
3376 i386_emit_stack_flush
,
3379 i386_emit_stack_adjust
,
3380 i386_emit_int_call_1
,
3381 i386_emit_void_call_2
,
3391 static struct emit_ops
*
3395 if (is_64bit_tdesc ())
3396 return &amd64_emit_ops
;
3399 return &i386_emit_ops
;
3403 x86_supports_range_stepping (void)
3408 /* This is initialized assuming an amd64 target.
3409 x86_arch_setup will correct it for i386 or amd64 targets. */
3411 struct linux_target_ops the_low_target
=
3414 x86_linux_regs_info
,
3415 x86_cannot_fetch_register
,
3416 x86_cannot_store_register
,
3417 NULL
, /* fetch_register */
3425 x86_supports_z_point_type
,
3428 x86_stopped_by_watchpoint
,
3429 x86_stopped_data_address
,
3430 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3431 native i386 case (no registers smaller than an xfer unit), and are not
3432 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3435 /* need to fix up i386 siginfo if host is amd64 */
3437 x86_linux_new_process
,
3438 x86_linux_new_thread
,
3439 x86_linux_prepare_to_resume
,
3440 x86_linux_process_qsupported
,
3441 x86_supports_tracepoints
,
3442 x86_get_thread_area
,
3443 x86_install_fast_tracepoint_jump_pad
,
3445 x86_get_min_fast_tracepoint_insn_len
,
3446 x86_supports_range_stepping
,
3450 initialize_low_arch (void)
3452 /* Initialize the Linux target descriptions. */
3454 init_registers_amd64_linux ();
3455 init_registers_amd64_avx_linux ();
3456 init_registers_amd64_avx512_linux ();
3457 init_registers_amd64_mpx_linux ();
3459 init_registers_x32_linux ();
3460 init_registers_x32_avx_linux ();
3461 init_registers_x32_avx512_linux ();
3463 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3464 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3465 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3467 init_registers_i386_linux ();
3468 init_registers_i386_mmx_linux ();
3469 init_registers_i386_avx_linux ();
3470 init_registers_i386_avx512_linux ();
3471 init_registers_i386_mpx_linux ();
3473 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3474 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3475 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3477 initialize_regsets_info (&x86_regsets_info
);