1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct i386_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
181 static const int x86_64_regmap
[] =
183 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
184 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
185 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
186 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
187 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
188 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
211 #else /* ! __x86_64__ */
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap
[] =
217 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
218 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
219 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
220 DS
* 4, ES
* 4, FS
* 4, GS
* 4
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
229 /* Returns true if the current inferior belongs to a x86-64 process,
233 is_64bit_tdesc (void)
235 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
237 return register_size (regcache
->tdesc
, 0) == 8;
243 /* Called by libthread_db. */
246 ps_get_thread_area (const struct ps_prochandle
*ph
,
247 lwpid_t lwpid
, int idx
, void **base
)
250 int use_64bit
= is_64bit_tdesc ();
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
261 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
272 unsigned int desc
[4];
274 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
275 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
278 /* Ensure we properly extend the value to 64-bits for x86_64. */
279 *base
= (void *) (uintptr_t) desc
[1];
284 /* Get the thread area address. This is used to recognize which
285 thread is which when tracing with the in-process agent library. We
286 don't read anything from the address, and treat it as opaque; it's
287 the address itself that we assume is unique per-thread. */
290 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
293 int use_64bit
= is_64bit_tdesc ();
298 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
300 *addr
= (CORE_ADDR
) (uintptr_t) base
;
309 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
310 struct thread_info
*thr
= get_lwp_thread (lwp
);
311 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
312 unsigned int desc
[4];
314 const int reg_thread_area
= 3; /* bits to scale down register value. */
317 collect_register_by_name (regcache
, "gs", &gs
);
319 idx
= gs
>> reg_thread_area
;
321 if (ptrace (PTRACE_GET_THREAD_AREA
,
323 (void *) (long) idx
, (unsigned long) &desc
) < 0)
334 x86_cannot_store_register (int regno
)
337 if (is_64bit_tdesc ())
341 return regno
>= I386_NUM_REGS
;
345 x86_cannot_fetch_register (int regno
)
348 if (is_64bit_tdesc ())
352 return regno
>= I386_NUM_REGS
;
356 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
361 if (register_size (regcache
->tdesc
, 0) == 8)
363 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
364 if (x86_64_regmap
[i
] != -1)
365 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
369 /* 32-bit inferior registers need to be zero-extended.
370 Callers would read uninitialized memory otherwise. */
371 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
374 for (i
= 0; i
< I386_NUM_REGS
; i
++)
375 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
377 collect_register_by_name (regcache
, "orig_eax",
378 ((char *) buf
) + ORIG_EAX
* 4);
382 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
387 if (register_size (regcache
->tdesc
, 0) == 8)
389 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
390 if (x86_64_regmap
[i
] != -1)
391 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
396 for (i
= 0; i
< I386_NUM_REGS
; i
++)
397 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
399 supply_register_by_name (regcache
, "orig_eax",
400 ((char *) buf
) + ORIG_EAX
* 4);
404 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
407 i387_cache_to_fxsave (regcache
, buf
);
409 i387_cache_to_fsave (regcache
, buf
);
414 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
417 i387_fxsave_to_cache (regcache
, buf
);
419 i387_fsave_to_cache (regcache
, buf
);
426 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
428 i387_cache_to_fxsave (regcache
, buf
);
432 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
434 i387_fxsave_to_cache (regcache
, buf
);
440 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
442 i387_cache_to_xsave (regcache
, buf
);
446 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
448 i387_xsave_to_cache (regcache
, buf
);
451 /* ??? The non-biarch i386 case stores all the i387 regs twice.
452 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
453 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
454 doesn't work. IWBN to avoid the duplication in the case where it
455 does work. Maybe the arch_setup routine could check whether it works
456 and update the supported regsets accordingly. */
458 static struct regset_info x86_regsets
[] =
460 #ifdef HAVE_PTRACE_GETREGS
461 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
463 x86_fill_gregset
, x86_store_gregset
},
464 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
465 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
467 # ifdef HAVE_PTRACE_GETFPXREGS
468 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
470 x86_fill_fpxregset
, x86_store_fpxregset
},
473 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
475 x86_fill_fpregset
, x86_store_fpregset
},
476 #endif /* HAVE_PTRACE_GETREGS */
477 { 0, 0, 0, -1, -1, NULL
, NULL
}
481 x86_get_pc (struct regcache
*regcache
)
483 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
488 collect_register_by_name (regcache
, "rip", &pc
);
489 return (CORE_ADDR
) pc
;
494 collect_register_by_name (regcache
, "eip", &pc
);
495 return (CORE_ADDR
) pc
;
500 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
502 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
506 unsigned long newpc
= pc
;
507 supply_register_by_name (regcache
, "rip", &newpc
);
511 unsigned int newpc
= pc
;
512 supply_register_by_name (regcache
, "eip", &newpc
);
516 static const unsigned char x86_breakpoint
[] = { 0xCC };
517 #define x86_breakpoint_len 1
520 x86_breakpoint_at (CORE_ADDR pc
)
524 (*the_target
->read_memory
) (pc
, &c
, 1);
531 /* Support for debug registers. */
534 x86_linux_dr_get (ptid_t ptid
, int regnum
)
539 tid
= ptid_get_lwp (ptid
);
542 value
= ptrace (PTRACE_PEEKUSER
, tid
,
543 offsetof (struct user
, u_debugreg
[regnum
]), 0);
545 error ("Couldn't read debug register");
551 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
555 tid
= ptid_get_lwp (ptid
);
558 ptrace (PTRACE_POKEUSER
, tid
,
559 offsetof (struct user
, u_debugreg
[regnum
]), value
);
561 error ("Couldn't write debug register");
565 update_debug_registers_callback (struct inferior_list_entry
*entry
,
568 struct thread_info
*thr
= (struct thread_info
*) entry
;
569 struct lwp_info
*lwp
= get_thread_lwp (thr
);
570 int pid
= *(int *) pid_p
;
572 /* Only update the threads of this process. */
573 if (pid_of (thr
) == pid
)
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp
->arch_private
->debug_registers_changed
= 1;
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
582 linux_stop_lwp (lwp
);
588 /* Update the inferior's debug register REGNUM from STATE. */
591 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
593 /* Only update the threads of this process. */
594 int pid
= pid_of (current_inferior
);
596 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
597 fatal ("Invalid debug register %d", regnum
);
599 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
602 /* Return the inferior's debug register REGNUM. */
605 i386_dr_low_get_addr (int regnum
)
607 ptid_t ptid
= ptid_of (current_inferior
);
609 /* DR6 and DR7 are retrieved with some other way. */
610 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
612 return x86_linux_dr_get (ptid
, regnum
);
615 /* Update the inferior's DR7 debug control register from STATE. */
618 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
620 /* Only update the threads of this process. */
621 int pid
= pid_of (current_inferior
);
623 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
626 /* Return the inferior's DR7 debug control register. */
629 i386_dr_low_get_control (void)
631 ptid_t ptid
= ptid_of (current_inferior
);
633 return x86_linux_dr_get (ptid
, DR_CONTROL
);
636 /* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
640 i386_dr_low_get_status (void)
642 ptid_t ptid
= ptid_of (current_inferior
);
644 return x86_linux_dr_get (ptid
, DR_STATUS
);
647 /* Breakpoint/Watchpoint support. */
650 x86_supports_z_point_type (char z_type
)
656 case Z_PACKET_WRITE_WP
:
657 case Z_PACKET_ACCESS_WP
:
665 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
666 int size
, struct raw_breakpoint
*bp
)
668 struct process_info
*proc
= current_process ();
672 case raw_bkpt_type_sw
:
673 return insert_memory_breakpoint (bp
);
675 case raw_bkpt_type_hw
:
676 case raw_bkpt_type_write_wp
:
677 case raw_bkpt_type_access_wp
:
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type
);
681 struct i386_debug_reg_state
*state
682 = &proc
->private->arch_private
->debug_reg_state
;
684 return i386_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
694 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
695 int size
, struct raw_breakpoint
*bp
)
697 struct process_info
*proc
= current_process ();
701 case raw_bkpt_type_sw
:
702 return remove_memory_breakpoint (bp
);
704 case raw_bkpt_type_hw
:
705 case raw_bkpt_type_write_wp
:
706 case raw_bkpt_type_access_wp
:
708 enum target_hw_bp_type hw_type
709 = raw_bkpt_type_to_target_hw_bp_type (type
);
710 struct i386_debug_reg_state
*state
711 = &proc
->private->arch_private
->debug_reg_state
;
713 return i386_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
722 x86_stopped_by_watchpoint (void)
724 struct process_info
*proc
= current_process ();
725 return i386_dr_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
729 x86_stopped_data_address (void)
731 struct process_info
*proc
= current_process ();
733 if (i386_dr_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
739 /* Called when a new process is created. */
741 static struct arch_process_info
*
742 x86_linux_new_process (void)
744 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
746 i386_low_init_dregs (&info
->debug_reg_state
);
751 /* Called when a new thread is detected. */
753 static struct arch_lwp_info
*
754 x86_linux_new_thread (void)
756 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
758 info
->debug_registers_changed
= 1;
763 /* Called when resuming a thread.
764 If the debug regs have changed, update the thread's copies. */
767 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
769 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
770 int clear_status
= 0;
772 if (lwp
->arch_private
->debug_registers_changed
)
775 int pid
= ptid_get_pid (ptid
);
776 struct process_info
*proc
= find_process_pid (pid
);
777 struct i386_debug_reg_state
*state
778 = &proc
->private->arch_private
->debug_reg_state
;
780 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
781 if (state
->dr_ref_count
[i
] > 0)
783 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
785 /* If we're setting a watchpoint, any change the inferior
786 had done itself to the debug registers needs to be
787 discarded, otherwise, i386_dr_stopped_data_address can
792 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
794 lwp
->arch_private
->debug_registers_changed
= 0;
797 if (clear_status
|| lwp
->stopped_by_watchpoint
)
798 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
801 /* When GDBSERVER is built as a 64-bit application on linux, the
802 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
803 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
804 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
805 conversion in-place ourselves. */
807 /* These types below (compat_*) define a siginfo type that is layout
808 compatible with the siginfo type exported by the 32-bit userspace
813 typedef int compat_int_t
;
814 typedef unsigned int compat_uptr_t
;
816 typedef int compat_time_t
;
817 typedef int compat_timer_t
;
818 typedef int compat_clock_t
;
820 struct compat_timeval
822 compat_time_t tv_sec
;
826 typedef union compat_sigval
828 compat_int_t sival_int
;
829 compat_uptr_t sival_ptr
;
832 typedef struct compat_siginfo
840 int _pad
[((128 / sizeof (int)) - 3)];
849 /* POSIX.1b timers */
854 compat_sigval_t _sigval
;
857 /* POSIX.1b signals */
862 compat_sigval_t _sigval
;
871 compat_clock_t _utime
;
872 compat_clock_t _stime
;
875 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
890 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
891 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
893 typedef struct compat_x32_siginfo
901 int _pad
[((128 / sizeof (int)) - 3)];
910 /* POSIX.1b timers */
915 compat_sigval_t _sigval
;
918 /* POSIX.1b signals */
923 compat_sigval_t _sigval
;
932 compat_x32_clock_t _utime
;
933 compat_x32_clock_t _stime
;
936 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
949 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
951 #define cpt_si_pid _sifields._kill._pid
952 #define cpt_si_uid _sifields._kill._uid
953 #define cpt_si_timerid _sifields._timer._tid
954 #define cpt_si_overrun _sifields._timer._overrun
955 #define cpt_si_status _sifields._sigchld._status
956 #define cpt_si_utime _sifields._sigchld._utime
957 #define cpt_si_stime _sifields._sigchld._stime
958 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
959 #define cpt_si_addr _sifields._sigfault._addr
960 #define cpt_si_band _sifields._sigpoll._band
961 #define cpt_si_fd _sifields._sigpoll._fd
963 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
964 In their place is si_timer1,si_timer2. */
966 #define si_timerid si_timer1
969 #define si_overrun si_timer2
973 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
975 memset (to
, 0, sizeof (*to
));
977 to
->si_signo
= from
->si_signo
;
978 to
->si_errno
= from
->si_errno
;
979 to
->si_code
= from
->si_code
;
981 if (to
->si_code
== SI_TIMER
)
983 to
->cpt_si_timerid
= from
->si_timerid
;
984 to
->cpt_si_overrun
= from
->si_overrun
;
985 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
987 else if (to
->si_code
== SI_USER
)
989 to
->cpt_si_pid
= from
->si_pid
;
990 to
->cpt_si_uid
= from
->si_uid
;
992 else if (to
->si_code
< 0)
994 to
->cpt_si_pid
= from
->si_pid
;
995 to
->cpt_si_uid
= from
->si_uid
;
996 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1000 switch (to
->si_signo
)
1003 to
->cpt_si_pid
= from
->si_pid
;
1004 to
->cpt_si_uid
= from
->si_uid
;
1005 to
->cpt_si_status
= from
->si_status
;
1006 to
->cpt_si_utime
= from
->si_utime
;
1007 to
->cpt_si_stime
= from
->si_stime
;
1013 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1016 to
->cpt_si_band
= from
->si_band
;
1017 to
->cpt_si_fd
= from
->si_fd
;
1020 to
->cpt_si_pid
= from
->si_pid
;
1021 to
->cpt_si_uid
= from
->si_uid
;
1022 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1029 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1031 memset (to
, 0, sizeof (*to
));
1033 to
->si_signo
= from
->si_signo
;
1034 to
->si_errno
= from
->si_errno
;
1035 to
->si_code
= from
->si_code
;
1037 if (to
->si_code
== SI_TIMER
)
1039 to
->si_timerid
= from
->cpt_si_timerid
;
1040 to
->si_overrun
= from
->cpt_si_overrun
;
1041 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1043 else if (to
->si_code
== SI_USER
)
1045 to
->si_pid
= from
->cpt_si_pid
;
1046 to
->si_uid
= from
->cpt_si_uid
;
1048 else if (to
->si_code
< 0)
1050 to
->si_pid
= from
->cpt_si_pid
;
1051 to
->si_uid
= from
->cpt_si_uid
;
1052 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1056 switch (to
->si_signo
)
1059 to
->si_pid
= from
->cpt_si_pid
;
1060 to
->si_uid
= from
->cpt_si_uid
;
1061 to
->si_status
= from
->cpt_si_status
;
1062 to
->si_utime
= from
->cpt_si_utime
;
1063 to
->si_stime
= from
->cpt_si_stime
;
1069 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1072 to
->si_band
= from
->cpt_si_band
;
1073 to
->si_fd
= from
->cpt_si_fd
;
1076 to
->si_pid
= from
->cpt_si_pid
;
1077 to
->si_uid
= from
->cpt_si_uid
;
1078 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1085 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1088 memset (to
, 0, sizeof (*to
));
1090 to
->si_signo
= from
->si_signo
;
1091 to
->si_errno
= from
->si_errno
;
1092 to
->si_code
= from
->si_code
;
1094 if (to
->si_code
== SI_TIMER
)
1096 to
->cpt_si_timerid
= from
->si_timerid
;
1097 to
->cpt_si_overrun
= from
->si_overrun
;
1098 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1100 else if (to
->si_code
== SI_USER
)
1102 to
->cpt_si_pid
= from
->si_pid
;
1103 to
->cpt_si_uid
= from
->si_uid
;
1105 else if (to
->si_code
< 0)
1107 to
->cpt_si_pid
= from
->si_pid
;
1108 to
->cpt_si_uid
= from
->si_uid
;
1109 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1113 switch (to
->si_signo
)
1116 to
->cpt_si_pid
= from
->si_pid
;
1117 to
->cpt_si_uid
= from
->si_uid
;
1118 to
->cpt_si_status
= from
->si_status
;
1119 to
->cpt_si_utime
= from
->si_utime
;
1120 to
->cpt_si_stime
= from
->si_stime
;
1126 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1129 to
->cpt_si_band
= from
->si_band
;
1130 to
->cpt_si_fd
= from
->si_fd
;
1133 to
->cpt_si_pid
= from
->si_pid
;
1134 to
->cpt_si_uid
= from
->si_uid
;
1135 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1142 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1143 compat_x32_siginfo_t
*from
)
1145 memset (to
, 0, sizeof (*to
));
1147 to
->si_signo
= from
->si_signo
;
1148 to
->si_errno
= from
->si_errno
;
1149 to
->si_code
= from
->si_code
;
1151 if (to
->si_code
== SI_TIMER
)
1153 to
->si_timerid
= from
->cpt_si_timerid
;
1154 to
->si_overrun
= from
->cpt_si_overrun
;
1155 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1157 else if (to
->si_code
== SI_USER
)
1159 to
->si_pid
= from
->cpt_si_pid
;
1160 to
->si_uid
= from
->cpt_si_uid
;
1162 else if (to
->si_code
< 0)
1164 to
->si_pid
= from
->cpt_si_pid
;
1165 to
->si_uid
= from
->cpt_si_uid
;
1166 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1170 switch (to
->si_signo
)
1173 to
->si_pid
= from
->cpt_si_pid
;
1174 to
->si_uid
= from
->cpt_si_uid
;
1175 to
->si_status
= from
->cpt_si_status
;
1176 to
->si_utime
= from
->cpt_si_utime
;
1177 to
->si_stime
= from
->cpt_si_stime
;
1183 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1186 to
->si_band
= from
->cpt_si_band
;
1187 to
->si_fd
= from
->cpt_si_fd
;
1190 to
->si_pid
= from
->cpt_si_pid
;
1191 to
->si_uid
= from
->cpt_si_uid
;
1192 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1198 #endif /* __x86_64__ */
1200 /* Convert a native/host siginfo object, into/from the siginfo in the
1201 layout of the inferiors' architecture. Returns true if any
1202 conversion was done; false otherwise. If DIRECTION is 1, then copy
1203 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1207 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1210 unsigned int machine
;
1211 int tid
= lwpid_of (current_inferior
);
1212 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1214 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1215 if (!is_64bit_tdesc ())
1217 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1218 fatal ("unexpected difference in siginfo");
1221 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1223 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1227 /* No fixup for native x32 GDB. */
1228 else if (!is_elf64
&& sizeof (void *) == 8)
1230 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1231 fatal ("unexpected difference in siginfo");
1234 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1237 siginfo_from_compat_x32_siginfo (native
,
1238 (struct compat_x32_siginfo
*) inf
);
1249 /* Format of XSAVE extended state is:
1252 fxsave_bytes[0..463]
1253 sw_usable_bytes[464..511]
1254 xstate_hdr_bytes[512..575]
1259 Same memory layout will be used for the coredump NT_X86_XSTATE
1260 representing the XSAVE extended state registers.
1262 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1263 extended state mask, which is the same as the extended control register
1264 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1265 together with the mask saved in the xstate_hdr_bytes to determine what
1266 states the processor/OS supports and what state, used or initialized,
1267 the process/thread is in. */
1268 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1270 /* Does the current host support the GETFPXREGS request? The header
1271 file may or may not define it, and even if it is defined, the
1272 kernel will return EIO if it's running on a pre-SSE processor. */
1273 int have_ptrace_getfpxregs
=
1274 #ifdef HAVE_PTRACE_GETFPXREGS
1281 /* Does the current host support PTRACE_GETREGSET? */
1282 static int have_ptrace_getregset
= -1;
1284 /* Get Linux/x86 target description from running target. */
1286 static const struct target_desc
*
1287 x86_linux_read_description (void)
1289 unsigned int machine
;
1293 static uint64_t xcr0
;
1294 struct regset_info
*regset
;
1296 tid
= lwpid_of (current_inferior
);
1298 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1300 if (sizeof (void *) == 4)
1303 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1305 else if (machine
== EM_X86_64
)
1306 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1310 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1311 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1313 elf_fpxregset_t fpxregs
;
1315 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1317 have_ptrace_getfpxregs
= 0;
1318 have_ptrace_getregset
= 0;
1319 return tdesc_i386_mmx_linux
;
1322 have_ptrace_getfpxregs
= 1;
1328 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1330 /* Don't use XML. */
1332 if (machine
== EM_X86_64
)
1333 return tdesc_amd64_linux_no_xml
;
1336 return tdesc_i386_linux_no_xml
;
1339 if (have_ptrace_getregset
== -1)
1341 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1344 iov
.iov_base
= xstateregs
;
1345 iov
.iov_len
= sizeof (xstateregs
);
1347 /* Check if PTRACE_GETREGSET works. */
1348 if (ptrace (PTRACE_GETREGSET
, tid
,
1349 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1350 have_ptrace_getregset
= 0;
1353 have_ptrace_getregset
= 1;
1355 /* Get XCR0 from XSAVE extended state. */
1356 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1357 / sizeof (uint64_t))];
1359 /* Use PTRACE_GETREGSET if it is available. */
1360 for (regset
= x86_regsets
;
1361 regset
->fill_function
!= NULL
; regset
++)
1362 if (regset
->get_request
== PTRACE_GETREGSET
)
1363 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1364 else if (regset
->type
!= GENERAL_REGS
)
1369 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1370 xcr0_features
= (have_ptrace_getregset
1371 && (xcr0
& I386_XSTATE_ALL_MASK
));
1376 if (machine
== EM_X86_64
)
1383 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1385 case I386_XSTATE_AVX512_MASK
:
1386 return tdesc_amd64_avx512_linux
;
1388 case I386_XSTATE_MPX_MASK
:
1389 return tdesc_amd64_mpx_linux
;
1391 case I386_XSTATE_AVX_MASK
:
1392 return tdesc_amd64_avx_linux
;
1395 return tdesc_amd64_linux
;
1399 return tdesc_amd64_linux
;
1405 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1407 case I386_XSTATE_AVX512_MASK
:
1408 return tdesc_x32_avx512_linux
;
1410 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1411 case I386_XSTATE_AVX_MASK
:
1412 return tdesc_x32_avx_linux
;
1415 return tdesc_x32_linux
;
1419 return tdesc_x32_linux
;
1427 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1429 case (I386_XSTATE_AVX512_MASK
):
1430 return tdesc_i386_avx512_linux
;
1432 case (I386_XSTATE_MPX_MASK
):
1433 return tdesc_i386_mpx_linux
;
1435 case (I386_XSTATE_AVX_MASK
):
1436 return tdesc_i386_avx_linux
;
1439 return tdesc_i386_linux
;
1443 return tdesc_i386_linux
;
1446 gdb_assert_not_reached ("failed to return tdesc");
1449 /* Callback for find_inferior. Stops iteration when a thread with a
1450 given PID is found. */
1453 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1455 int pid
= *(int *) data
;
1457 return (ptid_get_pid (entry
->id
) == pid
);
1460 /* Callback for for_each_inferior. Calls the arch_setup routine for
1464 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1466 int pid
= ptid_get_pid (entry
->id
);
1468 /* Look up any thread of this processes. */
1470 = (struct thread_info
*) find_inferior (&all_threads
,
1471 same_process_callback
, &pid
);
1473 the_low_target
.arch_setup ();
1476 /* Update all the target description of all processes; a new GDB
1477 connected, and it may or not support xml target descriptions. */
1480 x86_linux_update_xmltarget (void)
1482 struct thread_info
*save_inferior
= current_inferior
;
1484 /* Before changing the register cache's internal layout, flush the
1485 contents of the current valid caches back to the threads, and
1486 release the current regcache objects. */
1487 regcache_release ();
1489 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1491 current_inferior
= save_inferior
;
1494 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1495 PTRACE_GETREGSET. */
1498 x86_linux_process_qsupported (const char *query
)
1500 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1501 with "i386" in qSupported query, it supports x86 XML target
1504 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1506 char *copy
= xstrdup (query
+ 13);
1509 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1511 if (strcmp (p
, "i386") == 0)
1521 x86_linux_update_xmltarget ();
1524 /* Common for x86/x86-64. */
1526 static struct regsets_info x86_regsets_info
=
1528 x86_regsets
, /* regsets */
1529 0, /* num_regsets */
1530 NULL
, /* disabled_regsets */
1534 static struct regs_info amd64_linux_regs_info
=
1536 NULL
, /* regset_bitmap */
1537 NULL
, /* usrregs_info */
1541 static struct usrregs_info i386_linux_usrregs_info
=
1547 static struct regs_info i386_linux_regs_info
=
1549 NULL
, /* regset_bitmap */
1550 &i386_linux_usrregs_info
,
1554 const struct regs_info
*
1555 x86_linux_regs_info (void)
1558 if (is_64bit_tdesc ())
1559 return &amd64_linux_regs_info
;
1562 return &i386_linux_regs_info
;
1565 /* Initialize the target description for the architecture of the
1569 x86_arch_setup (void)
1571 current_process ()->tdesc
= x86_linux_read_description ();
1575 x86_supports_tracepoints (void)
1581 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1583 write_inferior_memory (*to
, buf
, len
);
1588 push_opcode (unsigned char *buf
, char *op
)
1590 unsigned char *buf_org
= buf
;
1595 unsigned long ul
= strtoul (op
, &endptr
, 16);
1604 return buf
- buf_org
;
1609 /* Build a jump pad that saves registers and calls a collection
1610 function. Writes a jump instruction to the jump pad to
1611 JJUMPAD_INSN. The caller is responsible to write it in at the
1612 tracepoint address. */
1615 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1616 CORE_ADDR collector
,
1619 CORE_ADDR
*jump_entry
,
1620 CORE_ADDR
*trampoline
,
1621 ULONGEST
*trampoline_size
,
1622 unsigned char *jjump_pad_insn
,
1623 ULONGEST
*jjump_pad_insn_size
,
1624 CORE_ADDR
*adjusted_insn_addr
,
1625 CORE_ADDR
*adjusted_insn_addr_end
,
1628 unsigned char buf
[40];
1632 CORE_ADDR buildaddr
= *jump_entry
;
1634 /* Build the jump pad. */
1636 /* First, do tracepoint data collection. Save registers. */
1638 /* Need to ensure stack pointer saved first. */
1639 buf
[i
++] = 0x54; /* push %rsp */
1640 buf
[i
++] = 0x55; /* push %rbp */
1641 buf
[i
++] = 0x57; /* push %rdi */
1642 buf
[i
++] = 0x56; /* push %rsi */
1643 buf
[i
++] = 0x52; /* push %rdx */
1644 buf
[i
++] = 0x51; /* push %rcx */
1645 buf
[i
++] = 0x53; /* push %rbx */
1646 buf
[i
++] = 0x50; /* push %rax */
1647 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1648 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1649 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1650 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1651 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1652 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1653 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1654 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1655 buf
[i
++] = 0x9c; /* pushfq */
1656 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1658 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1659 i
+= sizeof (unsigned long);
1660 buf
[i
++] = 0x57; /* push %rdi */
1661 append_insns (&buildaddr
, i
, buf
);
1663 /* Stack space for the collecting_t object. */
1665 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1666 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1667 memcpy (buf
+ i
, &tpoint
, 8);
1669 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1670 i
+= push_opcode (&buf
[i
],
1671 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1672 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1673 append_insns (&buildaddr
, i
, buf
);
1677 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1678 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1680 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1681 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1682 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1683 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1684 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1685 append_insns (&buildaddr
, i
, buf
);
1687 /* Set up the gdb_collect call. */
1688 /* At this point, (stack pointer + 0x18) is the base of our saved
1692 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1693 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1695 /* tpoint address may be 64-bit wide. */
1696 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1697 memcpy (buf
+ i
, &tpoint
, 8);
1699 append_insns (&buildaddr
, i
, buf
);
1701 /* The collector function being in the shared library, may be
1702 >31-bits away off the jump pad. */
1704 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1705 memcpy (buf
+ i
, &collector
, 8);
1707 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1708 append_insns (&buildaddr
, i
, buf
);
1710 /* Clear the spin-lock. */
1712 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1713 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1714 memcpy (buf
+ i
, &lockaddr
, 8);
1716 append_insns (&buildaddr
, i
, buf
);
1718 /* Remove stack that had been used for the collect_t object. */
1720 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1721 append_insns (&buildaddr
, i
, buf
);
1723 /* Restore register state. */
1725 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1729 buf
[i
++] = 0x9d; /* popfq */
1730 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1731 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1732 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1733 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1734 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1735 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1736 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1737 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1738 buf
[i
++] = 0x58; /* pop %rax */
1739 buf
[i
++] = 0x5b; /* pop %rbx */
1740 buf
[i
++] = 0x59; /* pop %rcx */
1741 buf
[i
++] = 0x5a; /* pop %rdx */
1742 buf
[i
++] = 0x5e; /* pop %rsi */
1743 buf
[i
++] = 0x5f; /* pop %rdi */
1744 buf
[i
++] = 0x5d; /* pop %rbp */
1745 buf
[i
++] = 0x5c; /* pop %rsp */
1746 append_insns (&buildaddr
, i
, buf
);
1748 /* Now, adjust the original instruction to execute in the jump
1750 *adjusted_insn_addr
= buildaddr
;
1751 relocate_instruction (&buildaddr
, tpaddr
);
1752 *adjusted_insn_addr_end
= buildaddr
;
1754 /* Finally, write a jump back to the program. */
1756 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1757 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1760 "E.Jump back from jump pad too far from tracepoint "
1761 "(offset 0x%" PRIx64
" > int32).", loffset
);
1765 offset
= (int) loffset
;
1766 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1767 memcpy (buf
+ 1, &offset
, 4);
1768 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1770 /* The jump pad is now built. Wire in a jump to our jump pad. This
1771 is always done last (by our caller actually), so that we can
1772 install fast tracepoints with threads running. This relies on
1773 the agent's atomic write support. */
1774 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1775 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1778 "E.Jump pad too far from tracepoint "
1779 "(offset 0x%" PRIx64
" > int32).", loffset
);
1783 offset
= (int) loffset
;
1785 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1786 memcpy (buf
+ 1, &offset
, 4);
1787 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1788 *jjump_pad_insn_size
= sizeof (jump_insn
);
1790 /* Return the end address of our pad. */
1791 *jump_entry
= buildaddr
;
1796 #endif /* __x86_64__ */
1798 /* Build a jump pad that saves registers and calls a collection
1799 function. Writes a jump instruction to the jump pad to
1800 JJUMPAD_INSN. The caller is responsible to write it in at the
1801 tracepoint address. */
1804 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1805 CORE_ADDR collector
,
1808 CORE_ADDR
*jump_entry
,
1809 CORE_ADDR
*trampoline
,
1810 ULONGEST
*trampoline_size
,
1811 unsigned char *jjump_pad_insn
,
1812 ULONGEST
*jjump_pad_insn_size
,
1813 CORE_ADDR
*adjusted_insn_addr
,
1814 CORE_ADDR
*adjusted_insn_addr_end
,
1817 unsigned char buf
[0x100];
1819 CORE_ADDR buildaddr
= *jump_entry
;
1821 /* Build the jump pad. */
1823 /* First, do tracepoint data collection. Save registers. */
1825 buf
[i
++] = 0x60; /* pushad */
1826 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1827 *((int *)(buf
+ i
)) = (int) tpaddr
;
1829 buf
[i
++] = 0x9c; /* pushf */
1830 buf
[i
++] = 0x1e; /* push %ds */
1831 buf
[i
++] = 0x06; /* push %es */
1832 buf
[i
++] = 0x0f; /* push %fs */
1834 buf
[i
++] = 0x0f; /* push %gs */
1836 buf
[i
++] = 0x16; /* push %ss */
1837 buf
[i
++] = 0x0e; /* push %cs */
1838 append_insns (&buildaddr
, i
, buf
);
1840 /* Stack space for the collecting_t object. */
1842 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1844 /* Build the object. */
1845 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1846 memcpy (buf
+ i
, &tpoint
, 4);
1848 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1850 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1851 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1852 append_insns (&buildaddr
, i
, buf
);
1854 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1855 If we cared for it, this could be using xchg alternatively. */
1858 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1859 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1861 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1863 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1864 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1865 append_insns (&buildaddr
, i
, buf
);
1868 /* Set up arguments to the gdb_collect call. */
1870 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1871 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1872 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1873 append_insns (&buildaddr
, i
, buf
);
1876 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1877 append_insns (&buildaddr
, i
, buf
);
1880 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1881 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1883 append_insns (&buildaddr
, i
, buf
);
1885 buf
[0] = 0xe8; /* call <reladdr> */
1886 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1887 memcpy (buf
+ 1, &offset
, 4);
1888 append_insns (&buildaddr
, 5, buf
);
1889 /* Clean up after the call. */
1890 buf
[0] = 0x83; /* add $0x8,%esp */
1893 append_insns (&buildaddr
, 3, buf
);
1896 /* Clear the spin-lock. This would need the LOCK prefix on older
1899 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1900 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1901 memcpy (buf
+ i
, &lockaddr
, 4);
1903 append_insns (&buildaddr
, i
, buf
);
1906 /* Remove stack that had been used for the collect_t object. */
1908 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1909 append_insns (&buildaddr
, i
, buf
);
1912 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1915 buf
[i
++] = 0x17; /* pop %ss */
1916 buf
[i
++] = 0x0f; /* pop %gs */
1918 buf
[i
++] = 0x0f; /* pop %fs */
1920 buf
[i
++] = 0x07; /* pop %es */
1921 buf
[i
++] = 0x1f; /* pop %ds */
1922 buf
[i
++] = 0x9d; /* popf */
1923 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1926 buf
[i
++] = 0x61; /* popad */
1927 append_insns (&buildaddr
, i
, buf
);
1929 /* Now, adjust the original instruction to execute in the jump
1931 *adjusted_insn_addr
= buildaddr
;
1932 relocate_instruction (&buildaddr
, tpaddr
);
1933 *adjusted_insn_addr_end
= buildaddr
;
1935 /* Write the jump back to the program. */
1936 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1937 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1938 memcpy (buf
+ 1, &offset
, 4);
1939 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1941 /* The jump pad is now built. Wire in a jump to our jump pad. This
1942 is always done last (by our caller actually), so that we can
1943 install fast tracepoints with threads running. This relies on
1944 the agent's atomic write support. */
1947 /* Create a trampoline. */
1948 *trampoline_size
= sizeof (jump_insn
);
1949 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1951 /* No trampoline space available. */
1953 "E.Cannot allocate trampoline space needed for fast "
1954 "tracepoints on 4-byte instructions.");
1958 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1959 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1960 memcpy (buf
+ 1, &offset
, 4);
1961 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1963 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1964 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1965 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1966 memcpy (buf
+ 2, &offset
, 2);
1967 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1968 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1972 /* Else use a 32-bit relative jump instruction. */
1973 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1974 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1975 memcpy (buf
+ 1, &offset
, 4);
1976 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1977 *jjump_pad_insn_size
= sizeof (jump_insn
);
1980 /* Return the end address of our pad. */
1981 *jump_entry
= buildaddr
;
1987 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1988 CORE_ADDR collector
,
1991 CORE_ADDR
*jump_entry
,
1992 CORE_ADDR
*trampoline
,
1993 ULONGEST
*trampoline_size
,
1994 unsigned char *jjump_pad_insn
,
1995 ULONGEST
*jjump_pad_insn_size
,
1996 CORE_ADDR
*adjusted_insn_addr
,
1997 CORE_ADDR
*adjusted_insn_addr_end
,
2001 if (is_64bit_tdesc ())
2002 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2003 collector
, lockaddr
,
2004 orig_size
, jump_entry
,
2005 trampoline
, trampoline_size
,
2007 jjump_pad_insn_size
,
2009 adjusted_insn_addr_end
,
2013 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2014 collector
, lockaddr
,
2015 orig_size
, jump_entry
,
2016 trampoline
, trampoline_size
,
2018 jjump_pad_insn_size
,
2020 adjusted_insn_addr_end
,
2024 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2028 x86_get_min_fast_tracepoint_insn_len (void)
2030 static int warned_about_fast_tracepoints
= 0;
2033 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2034 used for fast tracepoints. */
2035 if (is_64bit_tdesc ())
2039 if (agent_loaded_p ())
2041 char errbuf
[IPA_BUFSIZ
];
2045 /* On x86, if trampolines are available, then 4-byte jump instructions
2046 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2047 with a 4-byte offset are used instead. */
2048 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2052 /* GDB has no channel to explain to user why a shorter fast
2053 tracepoint is not possible, but at least make GDBserver
2054 mention that something has gone awry. */
2055 if (!warned_about_fast_tracepoints
)
2057 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2058 warned_about_fast_tracepoints
= 1;
2065 /* Indicate that the minimum length is currently unknown since the IPA
2066 has not loaded yet. */
2072 add_insns (unsigned char *start
, int len
)
2074 CORE_ADDR buildaddr
= current_insn_ptr
;
2077 debug_printf ("Adding %d bytes of insn at %s\n",
2078 len
, paddress (buildaddr
));
2080 append_insns (&buildaddr
, len
, start
);
2081 current_insn_ptr
= buildaddr
;
2084 /* Our general strategy for emitting code is to avoid specifying raw
2085 bytes whenever possible, and instead copy a block of inline asm
2086 that is embedded in the function. This is a little messy, because
2087 we need to keep the compiler from discarding what looks like dead
2088 code, plus suppress various warnings. */
2090 #define EMIT_ASM(NAME, INSNS) \
2093 extern unsigned char start_ ## NAME, end_ ## NAME; \
2094 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2095 __asm__ ("jmp end_" #NAME "\n" \
2096 "\t" "start_" #NAME ":" \
2098 "\t" "end_" #NAME ":"); \
2103 #define EMIT_ASM32(NAME,INSNS) \
2106 extern unsigned char start_ ## NAME, end_ ## NAME; \
2107 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2108 __asm__ (".code32\n" \
2109 "\t" "jmp end_" #NAME "\n" \
2110 "\t" "start_" #NAME ":\n" \
2112 "\t" "end_" #NAME ":\n" \
2118 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2125 amd64_emit_prologue (void)
2127 EMIT_ASM (amd64_prologue
,
2129 "movq %rsp,%rbp\n\t"
2130 "sub $0x20,%rsp\n\t"
2131 "movq %rdi,-8(%rbp)\n\t"
2132 "movq %rsi,-16(%rbp)");
2137 amd64_emit_epilogue (void)
2139 EMIT_ASM (amd64_epilogue
,
2140 "movq -16(%rbp),%rdi\n\t"
2141 "movq %rax,(%rdi)\n\t"
2148 amd64_emit_add (void)
2150 EMIT_ASM (amd64_add
,
2151 "add (%rsp),%rax\n\t"
2152 "lea 0x8(%rsp),%rsp");
2156 amd64_emit_sub (void)
2158 EMIT_ASM (amd64_sub
,
2159 "sub %rax,(%rsp)\n\t"
2164 amd64_emit_mul (void)
2170 amd64_emit_lsh (void)
2176 amd64_emit_rsh_signed (void)
2182 amd64_emit_rsh_unsigned (void)
2188 amd64_emit_ext (int arg
)
2193 EMIT_ASM (amd64_ext_8
,
2199 EMIT_ASM (amd64_ext_16
,
2204 EMIT_ASM (amd64_ext_32
,
2213 amd64_emit_log_not (void)
2215 EMIT_ASM (amd64_log_not
,
2216 "test %rax,%rax\n\t"
2222 amd64_emit_bit_and (void)
2224 EMIT_ASM (amd64_and
,
2225 "and (%rsp),%rax\n\t"
2226 "lea 0x8(%rsp),%rsp");
2230 amd64_emit_bit_or (void)
2233 "or (%rsp),%rax\n\t"
2234 "lea 0x8(%rsp),%rsp");
2238 amd64_emit_bit_xor (void)
2240 EMIT_ASM (amd64_xor
,
2241 "xor (%rsp),%rax\n\t"
2242 "lea 0x8(%rsp),%rsp");
2246 amd64_emit_bit_not (void)
2248 EMIT_ASM (amd64_bit_not
,
2249 "xorq $0xffffffffffffffff,%rax");
2253 amd64_emit_equal (void)
2255 EMIT_ASM (amd64_equal
,
2256 "cmp %rax,(%rsp)\n\t"
2257 "je .Lamd64_equal_true\n\t"
2259 "jmp .Lamd64_equal_end\n\t"
2260 ".Lamd64_equal_true:\n\t"
2262 ".Lamd64_equal_end:\n\t"
2263 "lea 0x8(%rsp),%rsp");
2267 amd64_emit_less_signed (void)
2269 EMIT_ASM (amd64_less_signed
,
2270 "cmp %rax,(%rsp)\n\t"
2271 "jl .Lamd64_less_signed_true\n\t"
2273 "jmp .Lamd64_less_signed_end\n\t"
2274 ".Lamd64_less_signed_true:\n\t"
2276 ".Lamd64_less_signed_end:\n\t"
2277 "lea 0x8(%rsp),%rsp");
2281 amd64_emit_less_unsigned (void)
2283 EMIT_ASM (amd64_less_unsigned
,
2284 "cmp %rax,(%rsp)\n\t"
2285 "jb .Lamd64_less_unsigned_true\n\t"
2287 "jmp .Lamd64_less_unsigned_end\n\t"
2288 ".Lamd64_less_unsigned_true:\n\t"
2290 ".Lamd64_less_unsigned_end:\n\t"
2291 "lea 0x8(%rsp),%rsp");
2295 amd64_emit_ref (int size
)
2300 EMIT_ASM (amd64_ref1
,
2304 EMIT_ASM (amd64_ref2
,
2308 EMIT_ASM (amd64_ref4
,
2309 "movl (%rax),%eax");
2312 EMIT_ASM (amd64_ref8
,
2313 "movq (%rax),%rax");
2319 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2321 EMIT_ASM (amd64_if_goto
,
2325 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2333 amd64_emit_goto (int *offset_p
, int *size_p
)
2335 EMIT_ASM (amd64_goto
,
2336 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2344 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2346 int diff
= (to
- (from
+ size
));
2347 unsigned char buf
[sizeof (int)];
2355 memcpy (buf
, &diff
, sizeof (int));
2356 write_inferior_memory (from
, buf
, sizeof (int));
2360 amd64_emit_const (LONGEST num
)
2362 unsigned char buf
[16];
2364 CORE_ADDR buildaddr
= current_insn_ptr
;
2367 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2368 memcpy (&buf
[i
], &num
, sizeof (num
));
2370 append_insns (&buildaddr
, i
, buf
);
2371 current_insn_ptr
= buildaddr
;
2375 amd64_emit_call (CORE_ADDR fn
)
2377 unsigned char buf
[16];
2379 CORE_ADDR buildaddr
;
2382 /* The destination function being in the shared library, may be
2383 >31-bits away off the compiled code pad. */
2385 buildaddr
= current_insn_ptr
;
2387 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2391 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2393 /* Offset is too large for a call. Use callq, but that requires
2394 a register, so avoid it if possible. Use r10, since it is
2395 call-clobbered, we don't have to push/pop it. */
2396 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2398 memcpy (buf
+ i
, &fn
, 8);
2400 buf
[i
++] = 0xff; /* callq *%r10 */
2405 int offset32
= offset64
; /* we know we can't overflow here. */
2406 memcpy (buf
+ i
, &offset32
, 4);
2410 append_insns (&buildaddr
, i
, buf
);
2411 current_insn_ptr
= buildaddr
;
2415 amd64_emit_reg (int reg
)
2417 unsigned char buf
[16];
2419 CORE_ADDR buildaddr
;
2421 /* Assume raw_regs is still in %rdi. */
2422 buildaddr
= current_insn_ptr
;
2424 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2425 memcpy (&buf
[i
], ®
, sizeof (reg
));
2427 append_insns (&buildaddr
, i
, buf
);
2428 current_insn_ptr
= buildaddr
;
2429 amd64_emit_call (get_raw_reg_func_addr ());
2433 amd64_emit_pop (void)
2435 EMIT_ASM (amd64_pop
,
2440 amd64_emit_stack_flush (void)
2442 EMIT_ASM (amd64_stack_flush
,
2447 amd64_emit_zero_ext (int arg
)
2452 EMIT_ASM (amd64_zero_ext_8
,
2456 EMIT_ASM (amd64_zero_ext_16
,
2457 "and $0xffff,%rax");
2460 EMIT_ASM (amd64_zero_ext_32
,
2461 "mov $0xffffffff,%rcx\n\t"
2470 amd64_emit_swap (void)
2472 EMIT_ASM (amd64_swap
,
2479 amd64_emit_stack_adjust (int n
)
2481 unsigned char buf
[16];
2483 CORE_ADDR buildaddr
= current_insn_ptr
;
2486 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2490 /* This only handles adjustments up to 16, but we don't expect any more. */
2492 append_insns (&buildaddr
, i
, buf
);
2493 current_insn_ptr
= buildaddr
;
2496 /* FN's prototype is `LONGEST(*fn)(int)'. */
2499 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2501 unsigned char buf
[16];
2503 CORE_ADDR buildaddr
;
2505 buildaddr
= current_insn_ptr
;
2507 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2508 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2510 append_insns (&buildaddr
, i
, buf
);
2511 current_insn_ptr
= buildaddr
;
2512 amd64_emit_call (fn
);
2515 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2518 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2520 unsigned char buf
[16];
2522 CORE_ADDR buildaddr
;
2524 buildaddr
= current_insn_ptr
;
2526 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2527 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2529 append_insns (&buildaddr
, i
, buf
);
2530 current_insn_ptr
= buildaddr
;
2531 EMIT_ASM (amd64_void_call_2_a
,
2532 /* Save away a copy of the stack top. */
2534 /* Also pass top as the second argument. */
2536 amd64_emit_call (fn
);
2537 EMIT_ASM (amd64_void_call_2_b
,
2538 /* Restore the stack top, %rax may have been trashed. */
2543 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2546 "cmp %rax,(%rsp)\n\t"
2547 "jne .Lamd64_eq_fallthru\n\t"
2548 "lea 0x8(%rsp),%rsp\n\t"
2550 /* jmp, but don't trust the assembler to choose the right jump */
2551 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2552 ".Lamd64_eq_fallthru:\n\t"
2553 "lea 0x8(%rsp),%rsp\n\t"
2563 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2566 "cmp %rax,(%rsp)\n\t"
2567 "je .Lamd64_ne_fallthru\n\t"
2568 "lea 0x8(%rsp),%rsp\n\t"
2570 /* jmp, but don't trust the assembler to choose the right jump */
2571 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2572 ".Lamd64_ne_fallthru:\n\t"
2573 "lea 0x8(%rsp),%rsp\n\t"
2583 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2586 "cmp %rax,(%rsp)\n\t"
2587 "jnl .Lamd64_lt_fallthru\n\t"
2588 "lea 0x8(%rsp),%rsp\n\t"
2590 /* jmp, but don't trust the assembler to choose the right jump */
2591 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2592 ".Lamd64_lt_fallthru:\n\t"
2593 "lea 0x8(%rsp),%rsp\n\t"
2603 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2606 "cmp %rax,(%rsp)\n\t"
2607 "jnle .Lamd64_le_fallthru\n\t"
2608 "lea 0x8(%rsp),%rsp\n\t"
2610 /* jmp, but don't trust the assembler to choose the right jump */
2611 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2612 ".Lamd64_le_fallthru:\n\t"
2613 "lea 0x8(%rsp),%rsp\n\t"
2623 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2626 "cmp %rax,(%rsp)\n\t"
2627 "jng .Lamd64_gt_fallthru\n\t"
2628 "lea 0x8(%rsp),%rsp\n\t"
2630 /* jmp, but don't trust the assembler to choose the right jump */
2631 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2632 ".Lamd64_gt_fallthru:\n\t"
2633 "lea 0x8(%rsp),%rsp\n\t"
2643 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2646 "cmp %rax,(%rsp)\n\t"
2647 "jnge .Lamd64_ge_fallthru\n\t"
2648 ".Lamd64_ge_jump:\n\t"
2649 "lea 0x8(%rsp),%rsp\n\t"
2651 /* jmp, but don't trust the assembler to choose the right jump */
2652 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2653 ".Lamd64_ge_fallthru:\n\t"
2654 "lea 0x8(%rsp),%rsp\n\t"
2663 struct emit_ops amd64_emit_ops
=
2665 amd64_emit_prologue
,
2666 amd64_emit_epilogue
,
2671 amd64_emit_rsh_signed
,
2672 amd64_emit_rsh_unsigned
,
2680 amd64_emit_less_signed
,
2681 amd64_emit_less_unsigned
,
2685 amd64_write_goto_address
,
2690 amd64_emit_stack_flush
,
2691 amd64_emit_zero_ext
,
2693 amd64_emit_stack_adjust
,
2694 amd64_emit_int_call_1
,
2695 amd64_emit_void_call_2
,
2704 #endif /* __x86_64__ */
2707 i386_emit_prologue (void)
2709 EMIT_ASM32 (i386_prologue
,
2713 /* At this point, the raw regs base address is at 8(%ebp), and the
2714 value pointer is at 12(%ebp). */
2718 i386_emit_epilogue (void)
2720 EMIT_ASM32 (i386_epilogue
,
2721 "mov 12(%ebp),%ecx\n\t"
2722 "mov %eax,(%ecx)\n\t"
2723 "mov %ebx,0x4(%ecx)\n\t"
2731 i386_emit_add (void)
2733 EMIT_ASM32 (i386_add
,
2734 "add (%esp),%eax\n\t"
2735 "adc 0x4(%esp),%ebx\n\t"
2736 "lea 0x8(%esp),%esp");
2740 i386_emit_sub (void)
2742 EMIT_ASM32 (i386_sub
,
2743 "subl %eax,(%esp)\n\t"
2744 "sbbl %ebx,4(%esp)\n\t"
2750 i386_emit_mul (void)
2756 i386_emit_lsh (void)
2762 i386_emit_rsh_signed (void)
2768 i386_emit_rsh_unsigned (void)
2774 i386_emit_ext (int arg
)
2779 EMIT_ASM32 (i386_ext_8
,
2782 "movl %eax,%ebx\n\t"
2786 EMIT_ASM32 (i386_ext_16
,
2788 "movl %eax,%ebx\n\t"
2792 EMIT_ASM32 (i386_ext_32
,
2793 "movl %eax,%ebx\n\t"
2802 i386_emit_log_not (void)
2804 EMIT_ASM32 (i386_log_not
,
2806 "test %eax,%eax\n\t"
2813 i386_emit_bit_and (void)
2815 EMIT_ASM32 (i386_and
,
2816 "and (%esp),%eax\n\t"
2817 "and 0x4(%esp),%ebx\n\t"
2818 "lea 0x8(%esp),%esp");
2822 i386_emit_bit_or (void)
2824 EMIT_ASM32 (i386_or
,
2825 "or (%esp),%eax\n\t"
2826 "or 0x4(%esp),%ebx\n\t"
2827 "lea 0x8(%esp),%esp");
2831 i386_emit_bit_xor (void)
2833 EMIT_ASM32 (i386_xor
,
2834 "xor (%esp),%eax\n\t"
2835 "xor 0x4(%esp),%ebx\n\t"
2836 "lea 0x8(%esp),%esp");
2840 i386_emit_bit_not (void)
2842 EMIT_ASM32 (i386_bit_not
,
2843 "xor $0xffffffff,%eax\n\t"
2844 "xor $0xffffffff,%ebx\n\t");
2848 i386_emit_equal (void)
2850 EMIT_ASM32 (i386_equal
,
2851 "cmpl %ebx,4(%esp)\n\t"
2852 "jne .Li386_equal_false\n\t"
2853 "cmpl %eax,(%esp)\n\t"
2854 "je .Li386_equal_true\n\t"
2855 ".Li386_equal_false:\n\t"
2857 "jmp .Li386_equal_end\n\t"
2858 ".Li386_equal_true:\n\t"
2860 ".Li386_equal_end:\n\t"
2862 "lea 0x8(%esp),%esp");
2866 i386_emit_less_signed (void)
2868 EMIT_ASM32 (i386_less_signed
,
2869 "cmpl %ebx,4(%esp)\n\t"
2870 "jl .Li386_less_signed_true\n\t"
2871 "jne .Li386_less_signed_false\n\t"
2872 "cmpl %eax,(%esp)\n\t"
2873 "jl .Li386_less_signed_true\n\t"
2874 ".Li386_less_signed_false:\n\t"
2876 "jmp .Li386_less_signed_end\n\t"
2877 ".Li386_less_signed_true:\n\t"
2879 ".Li386_less_signed_end:\n\t"
2881 "lea 0x8(%esp),%esp");
2885 i386_emit_less_unsigned (void)
2887 EMIT_ASM32 (i386_less_unsigned
,
2888 "cmpl %ebx,4(%esp)\n\t"
2889 "jb .Li386_less_unsigned_true\n\t"
2890 "jne .Li386_less_unsigned_false\n\t"
2891 "cmpl %eax,(%esp)\n\t"
2892 "jb .Li386_less_unsigned_true\n\t"
2893 ".Li386_less_unsigned_false:\n\t"
2895 "jmp .Li386_less_unsigned_end\n\t"
2896 ".Li386_less_unsigned_true:\n\t"
2898 ".Li386_less_unsigned_end:\n\t"
2900 "lea 0x8(%esp),%esp");
2904 i386_emit_ref (int size
)
2909 EMIT_ASM32 (i386_ref1
,
2913 EMIT_ASM32 (i386_ref2
,
2917 EMIT_ASM32 (i386_ref4
,
2918 "movl (%eax),%eax");
2921 EMIT_ASM32 (i386_ref8
,
2922 "movl 4(%eax),%ebx\n\t"
2923 "movl (%eax),%eax");
2929 i386_emit_if_goto (int *offset_p
, int *size_p
)
2931 EMIT_ASM32 (i386_if_goto
,
2937 /* Don't trust the assembler to choose the right jump */
2938 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2941 *offset_p
= 11; /* be sure that this matches the sequence above */
2947 i386_emit_goto (int *offset_p
, int *size_p
)
2949 EMIT_ASM32 (i386_goto
,
2950 /* Don't trust the assembler to choose the right jump */
2951 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2959 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2961 int diff
= (to
- (from
+ size
));
2962 unsigned char buf
[sizeof (int)];
2964 /* We're only doing 4-byte sizes at the moment. */
2971 memcpy (buf
, &diff
, sizeof (int));
2972 write_inferior_memory (from
, buf
, sizeof (int));
2976 i386_emit_const (LONGEST num
)
2978 unsigned char buf
[16];
2980 CORE_ADDR buildaddr
= current_insn_ptr
;
2983 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2984 lo
= num
& 0xffffffff;
2985 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2987 hi
= ((num
>> 32) & 0xffffffff);
2990 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2991 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2996 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2998 append_insns (&buildaddr
, i
, buf
);
2999 current_insn_ptr
= buildaddr
;
3003 i386_emit_call (CORE_ADDR fn
)
3005 unsigned char buf
[16];
3007 CORE_ADDR buildaddr
;
3009 buildaddr
= current_insn_ptr
;
3011 buf
[i
++] = 0xe8; /* call <reladdr> */
3012 offset
= ((int) fn
) - (buildaddr
+ 5);
3013 memcpy (buf
+ 1, &offset
, 4);
3014 append_insns (&buildaddr
, 5, buf
);
3015 current_insn_ptr
= buildaddr
;
3019 i386_emit_reg (int reg
)
3021 unsigned char buf
[16];
3023 CORE_ADDR buildaddr
;
3025 EMIT_ASM32 (i386_reg_a
,
3027 buildaddr
= current_insn_ptr
;
3029 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3030 memcpy (&buf
[i
], ®
, sizeof (reg
));
3032 append_insns (&buildaddr
, i
, buf
);
3033 current_insn_ptr
= buildaddr
;
3034 EMIT_ASM32 (i386_reg_b
,
3035 "mov %eax,4(%esp)\n\t"
3036 "mov 8(%ebp),%eax\n\t"
3038 i386_emit_call (get_raw_reg_func_addr ());
3039 EMIT_ASM32 (i386_reg_c
,
3041 "lea 0x8(%esp),%esp");
3045 i386_emit_pop (void)
3047 EMIT_ASM32 (i386_pop
,
3053 i386_emit_stack_flush (void)
3055 EMIT_ASM32 (i386_stack_flush
,
3061 i386_emit_zero_ext (int arg
)
3066 EMIT_ASM32 (i386_zero_ext_8
,
3067 "and $0xff,%eax\n\t"
3071 EMIT_ASM32 (i386_zero_ext_16
,
3072 "and $0xffff,%eax\n\t"
3076 EMIT_ASM32 (i386_zero_ext_32
,
3085 i386_emit_swap (void)
3087 EMIT_ASM32 (i386_swap
,
3097 i386_emit_stack_adjust (int n
)
3099 unsigned char buf
[16];
3101 CORE_ADDR buildaddr
= current_insn_ptr
;
3104 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3108 append_insns (&buildaddr
, i
, buf
);
3109 current_insn_ptr
= buildaddr
;
3112 /* FN's prototype is `LONGEST(*fn)(int)'. */
3115 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3117 unsigned char buf
[16];
3119 CORE_ADDR buildaddr
;
3121 EMIT_ASM32 (i386_int_call_1_a
,
3122 /* Reserve a bit of stack space. */
3124 /* Put the one argument on the stack. */
3125 buildaddr
= current_insn_ptr
;
3127 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3130 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3132 append_insns (&buildaddr
, i
, buf
);
3133 current_insn_ptr
= buildaddr
;
3134 i386_emit_call (fn
);
3135 EMIT_ASM32 (i386_int_call_1_c
,
3137 "lea 0x8(%esp),%esp");
3140 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3143 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3145 unsigned char buf
[16];
3147 CORE_ADDR buildaddr
;
3149 EMIT_ASM32 (i386_void_call_2_a
,
3150 /* Preserve %eax only; we don't have to worry about %ebx. */
3152 /* Reserve a bit of stack space for arguments. */
3153 "sub $0x10,%esp\n\t"
3154 /* Copy "top" to the second argument position. (Note that
3155 we can't assume function won't scribble on its
3156 arguments, so don't try to restore from this.) */
3157 "mov %eax,4(%esp)\n\t"
3158 "mov %ebx,8(%esp)");
3159 /* Put the first argument on the stack. */
3160 buildaddr
= current_insn_ptr
;
3162 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3165 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3167 append_insns (&buildaddr
, i
, buf
);
3168 current_insn_ptr
= buildaddr
;
3169 i386_emit_call (fn
);
3170 EMIT_ASM32 (i386_void_call_2_b
,
3171 "lea 0x10(%esp),%esp\n\t"
3172 /* Restore original stack top. */
3178 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3181 /* Check low half first, more likely to be decider */
3182 "cmpl %eax,(%esp)\n\t"
3183 "jne .Leq_fallthru\n\t"
3184 "cmpl %ebx,4(%esp)\n\t"
3185 "jne .Leq_fallthru\n\t"
3186 "lea 0x8(%esp),%esp\n\t"
3189 /* jmp, but don't trust the assembler to choose the right jump */
3190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3191 ".Leq_fallthru:\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3203 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3206 /* Check low half first, more likely to be decider */
3207 "cmpl %eax,(%esp)\n\t"
3209 "cmpl %ebx,4(%esp)\n\t"
3210 "je .Lne_fallthru\n\t"
3212 "lea 0x8(%esp),%esp\n\t"
3215 /* jmp, but don't trust the assembler to choose the right jump */
3216 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3217 ".Lne_fallthru:\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3229 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3232 "cmpl %ebx,4(%esp)\n\t"
3234 "jne .Llt_fallthru\n\t"
3235 "cmpl %eax,(%esp)\n\t"
3236 "jnl .Llt_fallthru\n\t"
3238 "lea 0x8(%esp),%esp\n\t"
3241 /* jmp, but don't trust the assembler to choose the right jump */
3242 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3243 ".Llt_fallthru:\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3255 i386_emit_le_goto (int *offset_p
, int *size_p
)
3258 "cmpl %ebx,4(%esp)\n\t"
3260 "jne .Lle_fallthru\n\t"
3261 "cmpl %eax,(%esp)\n\t"
3262 "jnle .Lle_fallthru\n\t"
3264 "lea 0x8(%esp),%esp\n\t"
3267 /* jmp, but don't trust the assembler to choose the right jump */
3268 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3269 ".Lle_fallthru:\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3281 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3284 "cmpl %ebx,4(%esp)\n\t"
3286 "jne .Lgt_fallthru\n\t"
3287 "cmpl %eax,(%esp)\n\t"
3288 "jng .Lgt_fallthru\n\t"
3290 "lea 0x8(%esp),%esp\n\t"
3293 /* jmp, but don't trust the assembler to choose the right jump */
3294 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3295 ".Lgt_fallthru:\n\t"
3296 "lea 0x8(%esp),%esp\n\t"
3307 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3310 "cmpl %ebx,4(%esp)\n\t"
3312 "jne .Lge_fallthru\n\t"
3313 "cmpl %eax,(%esp)\n\t"
3314 "jnge .Lge_fallthru\n\t"
3316 "lea 0x8(%esp),%esp\n\t"
3319 /* jmp, but don't trust the assembler to choose the right jump */
3320 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3321 ".Lge_fallthru:\n\t"
3322 "lea 0x8(%esp),%esp\n\t"
3332 struct emit_ops i386_emit_ops
=
3340 i386_emit_rsh_signed
,
3341 i386_emit_rsh_unsigned
,
3349 i386_emit_less_signed
,
3350 i386_emit_less_unsigned
,
3354 i386_write_goto_address
,
3359 i386_emit_stack_flush
,
3362 i386_emit_stack_adjust
,
3363 i386_emit_int_call_1
,
3364 i386_emit_void_call_2
,
3374 static struct emit_ops
*
3378 if (is_64bit_tdesc ())
3379 return &amd64_emit_ops
;
3382 return &i386_emit_ops
;
3386 x86_supports_range_stepping (void)
3391 /* This is initialized assuming an amd64 target.
3392 x86_arch_setup will correct it for i386 or amd64 targets. */
3394 struct linux_target_ops the_low_target
=
3397 x86_linux_regs_info
,
3398 x86_cannot_fetch_register
,
3399 x86_cannot_store_register
,
3400 NULL
, /* fetch_register */
3408 x86_supports_z_point_type
,
3411 x86_stopped_by_watchpoint
,
3412 x86_stopped_data_address
,
3413 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3414 native i386 case (no registers smaller than an xfer unit), and are not
3415 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3418 /* need to fix up i386 siginfo if host is amd64 */
3420 x86_linux_new_process
,
3421 x86_linux_new_thread
,
3422 x86_linux_prepare_to_resume
,
3423 x86_linux_process_qsupported
,
3424 x86_supports_tracepoints
,
3425 x86_get_thread_area
,
3426 x86_install_fast_tracepoint_jump_pad
,
3428 x86_get_min_fast_tracepoint_insn_len
,
3429 x86_supports_range_stepping
,
3433 initialize_low_arch (void)
3435 /* Initialize the Linux target descriptions. */
3437 init_registers_amd64_linux ();
3438 init_registers_amd64_avx_linux ();
3439 init_registers_amd64_avx512_linux ();
3440 init_registers_amd64_mpx_linux ();
3442 init_registers_x32_linux ();
3443 init_registers_x32_avx_linux ();
3444 init_registers_x32_avx512_linux ();
3446 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3447 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3448 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3450 init_registers_i386_linux ();
3451 init_registers_i386_mmx_linux ();
3452 init_registers_i386_avx_linux ();
3453 init_registers_i386_avx512_linux ();
3454 init_registers_i386_mpx_linux ();
3456 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3457 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3458 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3460 initialize_regsets_info (&x86_regsets_info
);