1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct i386_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
181 static const int x86_64_regmap
[] =
183 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
184 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
185 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
186 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
187 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
188 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #else /* ! __x86_64__ */
212 /* Mapping between the general-purpose registers in `struct user'
213 format and GDB's register array layout. */
214 static /*const*/ int i386_regmap
[] =
216 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
217 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
218 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
219 DS
* 4, ES
* 4, FS
* 4, GS
* 4
222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
228 /* Returns true if the current inferior belongs to a x86-64 process,
232 is_64bit_tdesc (void)
234 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
236 return register_size (regcache
->tdesc
, 0) == 8;
242 /* Called by libthread_db. */
245 ps_get_thread_area (const struct ps_prochandle
*ph
,
246 lwpid_t lwpid
, int idx
, void **base
)
249 int use_64bit
= is_64bit_tdesc ();
256 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
260 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
271 unsigned int desc
[4];
273 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
274 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
277 /* Ensure we properly extend the value to 64-bits for x86_64. */
278 *base
= (void *) (uintptr_t) desc
[1];
283 /* Get the thread area address. This is used to recognize which
284 thread is which when tracing with the in-process agent library. We
285 don't read anything from the address, and treat it as opaque; it's
286 the address itself that we assume is unique per-thread. */
289 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
292 int use_64bit
= is_64bit_tdesc ();
297 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
299 *addr
= (CORE_ADDR
) (uintptr_t) base
;
308 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
309 struct thread_info
*thr
= get_lwp_thread (lwp
);
310 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
311 unsigned int desc
[4];
313 const int reg_thread_area
= 3; /* bits to scale down register value. */
316 collect_register_by_name (regcache
, "gs", &gs
);
318 idx
= gs
>> reg_thread_area
;
320 if (ptrace (PTRACE_GET_THREAD_AREA
,
322 (void *) (long) idx
, (unsigned long) &desc
) < 0)
333 x86_cannot_store_register (int regno
)
336 if (is_64bit_tdesc ())
340 return regno
>= I386_NUM_REGS
;
344 x86_cannot_fetch_register (int regno
)
347 if (is_64bit_tdesc ())
351 return regno
>= I386_NUM_REGS
;
355 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
360 if (register_size (regcache
->tdesc
, 0) == 8)
362 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
363 if (x86_64_regmap
[i
] != -1)
364 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
369 for (i
= 0; i
< I386_NUM_REGS
; i
++)
370 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
372 collect_register_by_name (regcache
, "orig_eax",
373 ((char *) buf
) + ORIG_EAX
* 4);
377 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
382 if (register_size (regcache
->tdesc
, 0) == 8)
384 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
385 if (x86_64_regmap
[i
] != -1)
386 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
391 for (i
= 0; i
< I386_NUM_REGS
; i
++)
392 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
394 supply_register_by_name (regcache
, "orig_eax",
395 ((char *) buf
) + ORIG_EAX
* 4);
399 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
402 i387_cache_to_fxsave (regcache
, buf
);
404 i387_cache_to_fsave (regcache
, buf
);
409 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
412 i387_fxsave_to_cache (regcache
, buf
);
414 i387_fsave_to_cache (regcache
, buf
);
421 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
423 i387_cache_to_fxsave (regcache
, buf
);
427 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
429 i387_fxsave_to_cache (regcache
, buf
);
435 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
437 i387_cache_to_xsave (regcache
, buf
);
441 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
443 i387_xsave_to_cache (regcache
, buf
);
446 /* ??? The non-biarch i386 case stores all the i387 regs twice.
447 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
448 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
449 doesn't work. IWBN to avoid the duplication in the case where it
450 does work. Maybe the arch_setup routine could check whether it works
451 and update the supported regsets accordingly. */
453 static struct regset_info x86_regsets
[] =
455 #ifdef HAVE_PTRACE_GETREGS
456 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
458 x86_fill_gregset
, x86_store_gregset
},
459 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
460 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
462 # ifdef HAVE_PTRACE_GETFPXREGS
463 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
465 x86_fill_fpxregset
, x86_store_fpxregset
},
468 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
470 x86_fill_fpregset
, x86_store_fpregset
},
471 #endif /* HAVE_PTRACE_GETREGS */
472 { 0, 0, 0, -1, -1, NULL
, NULL
}
476 x86_get_pc (struct regcache
*regcache
)
478 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
483 collect_register_by_name (regcache
, "rip", &pc
);
484 return (CORE_ADDR
) pc
;
489 collect_register_by_name (regcache
, "eip", &pc
);
490 return (CORE_ADDR
) pc
;
495 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
497 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
501 unsigned long newpc
= pc
;
502 supply_register_by_name (regcache
, "rip", &newpc
);
506 unsigned int newpc
= pc
;
507 supply_register_by_name (regcache
, "eip", &newpc
);
511 static const unsigned char x86_breakpoint
[] = { 0xCC };
512 #define x86_breakpoint_len 1
515 x86_breakpoint_at (CORE_ADDR pc
)
519 (*the_target
->read_memory
) (pc
, &c
, 1);
526 /* Support for debug registers. */
529 x86_linux_dr_get (ptid_t ptid
, int regnum
)
534 tid
= ptid_get_lwp (ptid
);
537 value
= ptrace (PTRACE_PEEKUSER
, tid
,
538 offsetof (struct user
, u_debugreg
[regnum
]), 0);
540 error ("Couldn't read debug register");
546 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
550 tid
= ptid_get_lwp (ptid
);
553 ptrace (PTRACE_POKEUSER
, tid
,
554 offsetof (struct user
, u_debugreg
[regnum
]), value
);
556 error ("Couldn't write debug register");
560 update_debug_registers_callback (struct inferior_list_entry
*entry
,
563 struct thread_info
*thr
= (struct thread_info
*) entry
;
564 struct lwp_info
*lwp
= get_thread_lwp (thr
);
565 int pid
= *(int *) pid_p
;
567 /* Only update the threads of this process. */
568 if (pid_of (thr
) == pid
)
570 /* The actual update is done later just before resuming the lwp,
571 we just mark that the registers need updating. */
572 lwp
->arch_private
->debug_registers_changed
= 1;
574 /* If the lwp isn't stopped, force it to momentarily pause, so
575 we can update its debug registers. */
577 linux_stop_lwp (lwp
);
583 /* Update the inferior's debug register REGNUM from STATE. */
586 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
588 /* Only update the threads of this process. */
589 int pid
= pid_of (current_inferior
);
591 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
592 fatal ("Invalid debug register %d", regnum
);
594 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
597 /* Return the inferior's debug register REGNUM. */
600 i386_dr_low_get_addr (int regnum
)
602 ptid_t ptid
= ptid_of (current_inferior
);
604 /* DR6 and DR7 are retrieved with some other way. */
605 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
607 return x86_linux_dr_get (ptid
, regnum
);
610 /* Update the inferior's DR7 debug control register from STATE. */
613 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
615 /* Only update the threads of this process. */
616 int pid
= pid_of (current_inferior
);
618 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
621 /* Return the inferior's DR7 debug control register. */
624 i386_dr_low_get_control (void)
626 ptid_t ptid
= ptid_of (current_inferior
);
628 return x86_linux_dr_get (ptid
, DR_CONTROL
);
631 /* Get the value of the DR6 debug status register from the inferior
632 and record it in STATE. */
635 i386_dr_low_get_status (void)
637 ptid_t ptid
= ptid_of (current_inferior
);
639 return x86_linux_dr_get (ptid
, DR_STATUS
);
642 /* Breakpoint/Watchpoint support. */
645 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
647 struct process_info
*proc
= current_process ();
650 case '0': /* software-breakpoint */
654 ret
= prepare_to_access_memory ();
657 ret
= set_gdb_breakpoint_at (addr
);
658 done_accessing_memory ();
661 case '1': /* hardware-breakpoint */
662 case '2': /* write watchpoint */
663 case '3': /* read watchpoint */
664 case '4': /* access watchpoint */
666 enum target_hw_bp_type hw_type
= Z_packet_to_hw_type (type
);
667 struct i386_debug_reg_state
*state
668 = &proc
->private->arch_private
->debug_reg_state
;
670 return i386_low_insert_watchpoint (state
, hw_type
, addr
, len
);
680 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
682 struct process_info
*proc
= current_process ();
685 case '0': /* software-breakpoint */
689 ret
= prepare_to_access_memory ();
692 ret
= delete_gdb_breakpoint_at (addr
);
693 done_accessing_memory ();
696 case '1': /* hardware-breakpoint */
697 case '2': /* write watchpoint */
698 case '3': /* read watchpoint */
699 case '4': /* access watchpoint */
701 enum target_hw_bp_type hw_type
= Z_packet_to_hw_type (type
);
702 struct i386_debug_reg_state
*state
703 = &proc
->private->arch_private
->debug_reg_state
;
705 return i386_low_remove_watchpoint (state
, hw_type
, addr
, len
);
714 x86_stopped_by_watchpoint (void)
716 struct process_info
*proc
= current_process ();
717 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
721 x86_stopped_data_address (void)
723 struct process_info
*proc
= current_process ();
725 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
731 /* Called when a new process is created. */
733 static struct arch_process_info
*
734 x86_linux_new_process (void)
736 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
738 i386_low_init_dregs (&info
->debug_reg_state
);
743 /* Called when a new thread is detected. */
745 static struct arch_lwp_info
*
746 x86_linux_new_thread (void)
748 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
750 info
->debug_registers_changed
= 1;
755 /* Called when resuming a thread.
756 If the debug regs have changed, update the thread's copies. */
759 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
761 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
762 int clear_status
= 0;
764 if (lwp
->arch_private
->debug_registers_changed
)
767 int pid
= ptid_get_pid (ptid
);
768 struct process_info
*proc
= find_process_pid (pid
);
769 struct i386_debug_reg_state
*state
770 = &proc
->private->arch_private
->debug_reg_state
;
772 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
773 if (state
->dr_ref_count
[i
] > 0)
775 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
777 /* If we're setting a watchpoint, any change the inferior
778 had done itself to the debug registers needs to be
779 discarded, otherwise, i386_low_stopped_data_address can
784 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
786 lwp
->arch_private
->debug_registers_changed
= 0;
789 if (clear_status
|| lwp
->stopped_by_watchpoint
)
790 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
793 /* When GDBSERVER is built as a 64-bit application on linux, the
794 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
795 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
796 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
797 conversion in-place ourselves. */
799 /* These types below (compat_*) define a siginfo type that is layout
800 compatible with the siginfo type exported by the 32-bit userspace
805 typedef int compat_int_t
;
806 typedef unsigned int compat_uptr_t
;
808 typedef int compat_time_t
;
809 typedef int compat_timer_t
;
810 typedef int compat_clock_t
;
812 struct compat_timeval
814 compat_time_t tv_sec
;
818 typedef union compat_sigval
820 compat_int_t sival_int
;
821 compat_uptr_t sival_ptr
;
824 typedef struct compat_siginfo
832 int _pad
[((128 / sizeof (int)) - 3)];
841 /* POSIX.1b timers */
846 compat_sigval_t _sigval
;
849 /* POSIX.1b signals */
854 compat_sigval_t _sigval
;
863 compat_clock_t _utime
;
864 compat_clock_t _stime
;
867 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
882 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
883 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
885 typedef struct compat_x32_siginfo
893 int _pad
[((128 / sizeof (int)) - 3)];
902 /* POSIX.1b timers */
907 compat_sigval_t _sigval
;
910 /* POSIX.1b signals */
915 compat_sigval_t _sigval
;
924 compat_x32_clock_t _utime
;
925 compat_x32_clock_t _stime
;
928 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
941 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
943 #define cpt_si_pid _sifields._kill._pid
944 #define cpt_si_uid _sifields._kill._uid
945 #define cpt_si_timerid _sifields._timer._tid
946 #define cpt_si_overrun _sifields._timer._overrun
947 #define cpt_si_status _sifields._sigchld._status
948 #define cpt_si_utime _sifields._sigchld._utime
949 #define cpt_si_stime _sifields._sigchld._stime
950 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
951 #define cpt_si_addr _sifields._sigfault._addr
952 #define cpt_si_band _sifields._sigpoll._band
953 #define cpt_si_fd _sifields._sigpoll._fd
955 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
956 In their place is si_timer1,si_timer2. */
958 #define si_timerid si_timer1
961 #define si_overrun si_timer2
965 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
967 memset (to
, 0, sizeof (*to
));
969 to
->si_signo
= from
->si_signo
;
970 to
->si_errno
= from
->si_errno
;
971 to
->si_code
= from
->si_code
;
973 if (to
->si_code
== SI_TIMER
)
975 to
->cpt_si_timerid
= from
->si_timerid
;
976 to
->cpt_si_overrun
= from
->si_overrun
;
977 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
979 else if (to
->si_code
== SI_USER
)
981 to
->cpt_si_pid
= from
->si_pid
;
982 to
->cpt_si_uid
= from
->si_uid
;
984 else if (to
->si_code
< 0)
986 to
->cpt_si_pid
= from
->si_pid
;
987 to
->cpt_si_uid
= from
->si_uid
;
988 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
992 switch (to
->si_signo
)
995 to
->cpt_si_pid
= from
->si_pid
;
996 to
->cpt_si_uid
= from
->si_uid
;
997 to
->cpt_si_status
= from
->si_status
;
998 to
->cpt_si_utime
= from
->si_utime
;
999 to
->cpt_si_stime
= from
->si_stime
;
1005 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1008 to
->cpt_si_band
= from
->si_band
;
1009 to
->cpt_si_fd
= from
->si_fd
;
1012 to
->cpt_si_pid
= from
->si_pid
;
1013 to
->cpt_si_uid
= from
->si_uid
;
1014 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1021 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1023 memset (to
, 0, sizeof (*to
));
1025 to
->si_signo
= from
->si_signo
;
1026 to
->si_errno
= from
->si_errno
;
1027 to
->si_code
= from
->si_code
;
1029 if (to
->si_code
== SI_TIMER
)
1031 to
->si_timerid
= from
->cpt_si_timerid
;
1032 to
->si_overrun
= from
->cpt_si_overrun
;
1033 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1035 else if (to
->si_code
== SI_USER
)
1037 to
->si_pid
= from
->cpt_si_pid
;
1038 to
->si_uid
= from
->cpt_si_uid
;
1040 else if (to
->si_code
< 0)
1042 to
->si_pid
= from
->cpt_si_pid
;
1043 to
->si_uid
= from
->cpt_si_uid
;
1044 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1048 switch (to
->si_signo
)
1051 to
->si_pid
= from
->cpt_si_pid
;
1052 to
->si_uid
= from
->cpt_si_uid
;
1053 to
->si_status
= from
->cpt_si_status
;
1054 to
->si_utime
= from
->cpt_si_utime
;
1055 to
->si_stime
= from
->cpt_si_stime
;
1061 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1064 to
->si_band
= from
->cpt_si_band
;
1065 to
->si_fd
= from
->cpt_si_fd
;
1068 to
->si_pid
= from
->cpt_si_pid
;
1069 to
->si_uid
= from
->cpt_si_uid
;
1070 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1077 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1080 memset (to
, 0, sizeof (*to
));
1082 to
->si_signo
= from
->si_signo
;
1083 to
->si_errno
= from
->si_errno
;
1084 to
->si_code
= from
->si_code
;
1086 if (to
->si_code
== SI_TIMER
)
1088 to
->cpt_si_timerid
= from
->si_timerid
;
1089 to
->cpt_si_overrun
= from
->si_overrun
;
1090 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1092 else if (to
->si_code
== SI_USER
)
1094 to
->cpt_si_pid
= from
->si_pid
;
1095 to
->cpt_si_uid
= from
->si_uid
;
1097 else if (to
->si_code
< 0)
1099 to
->cpt_si_pid
= from
->si_pid
;
1100 to
->cpt_si_uid
= from
->si_uid
;
1101 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1105 switch (to
->si_signo
)
1108 to
->cpt_si_pid
= from
->si_pid
;
1109 to
->cpt_si_uid
= from
->si_uid
;
1110 to
->cpt_si_status
= from
->si_status
;
1111 to
->cpt_si_utime
= from
->si_utime
;
1112 to
->cpt_si_stime
= from
->si_stime
;
1118 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1121 to
->cpt_si_band
= from
->si_band
;
1122 to
->cpt_si_fd
= from
->si_fd
;
1125 to
->cpt_si_pid
= from
->si_pid
;
1126 to
->cpt_si_uid
= from
->si_uid
;
1127 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1134 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1135 compat_x32_siginfo_t
*from
)
1137 memset (to
, 0, sizeof (*to
));
1139 to
->si_signo
= from
->si_signo
;
1140 to
->si_errno
= from
->si_errno
;
1141 to
->si_code
= from
->si_code
;
1143 if (to
->si_code
== SI_TIMER
)
1145 to
->si_timerid
= from
->cpt_si_timerid
;
1146 to
->si_overrun
= from
->cpt_si_overrun
;
1147 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1149 else if (to
->si_code
== SI_USER
)
1151 to
->si_pid
= from
->cpt_si_pid
;
1152 to
->si_uid
= from
->cpt_si_uid
;
1154 else if (to
->si_code
< 0)
1156 to
->si_pid
= from
->cpt_si_pid
;
1157 to
->si_uid
= from
->cpt_si_uid
;
1158 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1162 switch (to
->si_signo
)
1165 to
->si_pid
= from
->cpt_si_pid
;
1166 to
->si_uid
= from
->cpt_si_uid
;
1167 to
->si_status
= from
->cpt_si_status
;
1168 to
->si_utime
= from
->cpt_si_utime
;
1169 to
->si_stime
= from
->cpt_si_stime
;
1175 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1178 to
->si_band
= from
->cpt_si_band
;
1179 to
->si_fd
= from
->cpt_si_fd
;
1182 to
->si_pid
= from
->cpt_si_pid
;
1183 to
->si_uid
= from
->cpt_si_uid
;
1184 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1190 #endif /* __x86_64__ */
1192 /* Convert a native/host siginfo object, into/from the siginfo in the
1193 layout of the inferiors' architecture. Returns true if any
1194 conversion was done; false otherwise. If DIRECTION is 1, then copy
1195 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1199 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1202 unsigned int machine
;
1203 int tid
= lwpid_of (current_inferior
);
1204 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1206 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1207 if (!is_64bit_tdesc ())
1209 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1210 fatal ("unexpected difference in siginfo");
1213 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1215 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1219 /* No fixup for native x32 GDB. */
1220 else if (!is_elf64
&& sizeof (void *) == 8)
1222 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1223 fatal ("unexpected difference in siginfo");
1226 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1229 siginfo_from_compat_x32_siginfo (native
,
1230 (struct compat_x32_siginfo
*) inf
);
1241 /* Format of XSAVE extended state is:
1244 fxsave_bytes[0..463]
1245 sw_usable_bytes[464..511]
1246 xstate_hdr_bytes[512..575]
1251 Same memory layout will be used for the coredump NT_X86_XSTATE
1252 representing the XSAVE extended state registers.
1254 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1255 extended state mask, which is the same as the extended control register
1256 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1257 together with the mask saved in the xstate_hdr_bytes to determine what
1258 states the processor/OS supports and what state, used or initialized,
1259 the process/thread is in. */
1260 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1262 /* Does the current host support the GETFPXREGS request? The header
1263 file may or may not define it, and even if it is defined, the
1264 kernel will return EIO if it's running on a pre-SSE processor. */
1265 int have_ptrace_getfpxregs
=
1266 #ifdef HAVE_PTRACE_GETFPXREGS
1273 /* Does the current host support PTRACE_GETREGSET? */
1274 static int have_ptrace_getregset
= -1;
1276 /* Get Linux/x86 target description from running target. */
1278 static const struct target_desc
*
1279 x86_linux_read_description (void)
1281 unsigned int machine
;
1285 static uint64_t xcr0
;
1286 struct regset_info
*regset
;
1288 tid
= lwpid_of (current_inferior
);
1290 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1292 if (sizeof (void *) == 4)
1295 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1297 else if (machine
== EM_X86_64
)
1298 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1302 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1303 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1305 elf_fpxregset_t fpxregs
;
1307 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1309 have_ptrace_getfpxregs
= 0;
1310 have_ptrace_getregset
= 0;
1311 return tdesc_i386_mmx_linux
;
1314 have_ptrace_getfpxregs
= 1;
1320 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1322 /* Don't use XML. */
1324 if (machine
== EM_X86_64
)
1325 return tdesc_amd64_linux_no_xml
;
1328 return tdesc_i386_linux_no_xml
;
1331 if (have_ptrace_getregset
== -1)
1333 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1336 iov
.iov_base
= xstateregs
;
1337 iov
.iov_len
= sizeof (xstateregs
);
1339 /* Check if PTRACE_GETREGSET works. */
1340 if (ptrace (PTRACE_GETREGSET
, tid
,
1341 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1342 have_ptrace_getregset
= 0;
1345 have_ptrace_getregset
= 1;
1347 /* Get XCR0 from XSAVE extended state. */
1348 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1349 / sizeof (uint64_t))];
1351 /* Use PTRACE_GETREGSET if it is available. */
1352 for (regset
= x86_regsets
;
1353 regset
->fill_function
!= NULL
; regset
++)
1354 if (regset
->get_request
== PTRACE_GETREGSET
)
1355 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1356 else if (regset
->type
!= GENERAL_REGS
)
1361 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1362 xcr0_features
= (have_ptrace_getregset
1363 && (xcr0
& I386_XSTATE_ALL_MASK
));
1368 if (machine
== EM_X86_64
)
1375 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1377 case I386_XSTATE_AVX512_MASK
:
1378 return tdesc_amd64_avx512_linux
;
1380 case I386_XSTATE_MPX_MASK
:
1381 return tdesc_amd64_mpx_linux
;
1383 case I386_XSTATE_AVX_MASK
:
1384 return tdesc_amd64_avx_linux
;
1387 return tdesc_amd64_linux
;
1391 return tdesc_amd64_linux
;
1397 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1399 case I386_XSTATE_AVX512_MASK
:
1400 return tdesc_x32_avx512_linux
;
1402 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1403 case I386_XSTATE_AVX_MASK
:
1404 return tdesc_x32_avx_linux
;
1407 return tdesc_x32_linux
;
1411 return tdesc_x32_linux
;
1419 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1421 case (I386_XSTATE_AVX512_MASK
):
1422 return tdesc_i386_avx512_linux
;
1424 case (I386_XSTATE_MPX_MASK
):
1425 return tdesc_i386_mpx_linux
;
1427 case (I386_XSTATE_AVX_MASK
):
1428 return tdesc_i386_avx_linux
;
1431 return tdesc_i386_linux
;
1435 return tdesc_i386_linux
;
1438 gdb_assert_not_reached ("failed to return tdesc");
1441 /* Callback for find_inferior. Stops iteration when a thread with a
1442 given PID is found. */
1445 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1447 int pid
= *(int *) data
;
1449 return (ptid_get_pid (entry
->id
) == pid
);
1452 /* Callback for for_each_inferior. Calls the arch_setup routine for
1456 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1458 int pid
= ptid_get_pid (entry
->id
);
1460 /* Look up any thread of this processes. */
1462 = (struct thread_info
*) find_inferior (&all_threads
,
1463 same_process_callback
, &pid
);
1465 the_low_target
.arch_setup ();
1468 /* Update all the target description of all processes; a new GDB
1469 connected, and it may or not support xml target descriptions. */
1472 x86_linux_update_xmltarget (void)
1474 struct thread_info
*save_inferior
= current_inferior
;
1476 /* Before changing the register cache's internal layout, flush the
1477 contents of the current valid caches back to the threads, and
1478 release the current regcache objects. */
1479 regcache_release ();
1481 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1483 current_inferior
= save_inferior
;
1486 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1487 PTRACE_GETREGSET. */
1490 x86_linux_process_qsupported (const char *query
)
1492 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1493 with "i386" in qSupported query, it supports x86 XML target
1496 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1498 char *copy
= xstrdup (query
+ 13);
1501 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1503 if (strcmp (p
, "i386") == 0)
1513 x86_linux_update_xmltarget ();
1516 /* Common for x86/x86-64. */
1518 static struct regsets_info x86_regsets_info
=
1520 x86_regsets
, /* regsets */
1521 0, /* num_regsets */
1522 NULL
, /* disabled_regsets */
1526 static struct regs_info amd64_linux_regs_info
=
1528 NULL
, /* regset_bitmap */
1529 NULL
, /* usrregs_info */
1533 static struct usrregs_info i386_linux_usrregs_info
=
1539 static struct regs_info i386_linux_regs_info
=
1541 NULL
, /* regset_bitmap */
1542 &i386_linux_usrregs_info
,
1546 const struct regs_info
*
1547 x86_linux_regs_info (void)
1550 if (is_64bit_tdesc ())
1551 return &amd64_linux_regs_info
;
1554 return &i386_linux_regs_info
;
1557 /* Initialize the target description for the architecture of the
1561 x86_arch_setup (void)
1563 current_process ()->tdesc
= x86_linux_read_description ();
1567 x86_supports_tracepoints (void)
1573 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1575 write_inferior_memory (*to
, buf
, len
);
1580 push_opcode (unsigned char *buf
, char *op
)
1582 unsigned char *buf_org
= buf
;
1587 unsigned long ul
= strtoul (op
, &endptr
, 16);
1596 return buf
- buf_org
;
1601 /* Build a jump pad that saves registers and calls a collection
1602 function. Writes a jump instruction to the jump pad to
1603 JJUMPAD_INSN. The caller is responsible to write it in at the
1604 tracepoint address. */
1607 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1608 CORE_ADDR collector
,
1611 CORE_ADDR
*jump_entry
,
1612 CORE_ADDR
*trampoline
,
1613 ULONGEST
*trampoline_size
,
1614 unsigned char *jjump_pad_insn
,
1615 ULONGEST
*jjump_pad_insn_size
,
1616 CORE_ADDR
*adjusted_insn_addr
,
1617 CORE_ADDR
*adjusted_insn_addr_end
,
1620 unsigned char buf
[40];
1624 CORE_ADDR buildaddr
= *jump_entry
;
1626 /* Build the jump pad. */
1628 /* First, do tracepoint data collection. Save registers. */
1630 /* Need to ensure stack pointer saved first. */
1631 buf
[i
++] = 0x54; /* push %rsp */
1632 buf
[i
++] = 0x55; /* push %rbp */
1633 buf
[i
++] = 0x57; /* push %rdi */
1634 buf
[i
++] = 0x56; /* push %rsi */
1635 buf
[i
++] = 0x52; /* push %rdx */
1636 buf
[i
++] = 0x51; /* push %rcx */
1637 buf
[i
++] = 0x53; /* push %rbx */
1638 buf
[i
++] = 0x50; /* push %rax */
1639 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1640 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1641 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1642 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1643 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1644 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1645 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1646 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1647 buf
[i
++] = 0x9c; /* pushfq */
1648 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1650 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1651 i
+= sizeof (unsigned long);
1652 buf
[i
++] = 0x57; /* push %rdi */
1653 append_insns (&buildaddr
, i
, buf
);
1655 /* Stack space for the collecting_t object. */
1657 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1658 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1659 memcpy (buf
+ i
, &tpoint
, 8);
1661 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1662 i
+= push_opcode (&buf
[i
],
1663 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1664 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1665 append_insns (&buildaddr
, i
, buf
);
1669 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1670 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1672 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1673 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1674 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1675 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1676 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1677 append_insns (&buildaddr
, i
, buf
);
1679 /* Set up the gdb_collect call. */
1680 /* At this point, (stack pointer + 0x18) is the base of our saved
1684 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1685 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1687 /* tpoint address may be 64-bit wide. */
1688 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1689 memcpy (buf
+ i
, &tpoint
, 8);
1691 append_insns (&buildaddr
, i
, buf
);
1693 /* The collector function being in the shared library, may be
1694 >31-bits away off the jump pad. */
1696 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1697 memcpy (buf
+ i
, &collector
, 8);
1699 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1700 append_insns (&buildaddr
, i
, buf
);
1702 /* Clear the spin-lock. */
1704 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1705 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1706 memcpy (buf
+ i
, &lockaddr
, 8);
1708 append_insns (&buildaddr
, i
, buf
);
1710 /* Remove stack that had been used for the collect_t object. */
1712 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1713 append_insns (&buildaddr
, i
, buf
);
1715 /* Restore register state. */
1717 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1721 buf
[i
++] = 0x9d; /* popfq */
1722 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1723 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1724 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1725 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1726 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1727 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1728 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1729 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1730 buf
[i
++] = 0x58; /* pop %rax */
1731 buf
[i
++] = 0x5b; /* pop %rbx */
1732 buf
[i
++] = 0x59; /* pop %rcx */
1733 buf
[i
++] = 0x5a; /* pop %rdx */
1734 buf
[i
++] = 0x5e; /* pop %rsi */
1735 buf
[i
++] = 0x5f; /* pop %rdi */
1736 buf
[i
++] = 0x5d; /* pop %rbp */
1737 buf
[i
++] = 0x5c; /* pop %rsp */
1738 append_insns (&buildaddr
, i
, buf
);
1740 /* Now, adjust the original instruction to execute in the jump
1742 *adjusted_insn_addr
= buildaddr
;
1743 relocate_instruction (&buildaddr
, tpaddr
);
1744 *adjusted_insn_addr_end
= buildaddr
;
1746 /* Finally, write a jump back to the program. */
1748 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1749 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1752 "E.Jump back from jump pad too far from tracepoint "
1753 "(offset 0x%" PRIx64
" > int32).", loffset
);
1757 offset
= (int) loffset
;
1758 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1759 memcpy (buf
+ 1, &offset
, 4);
1760 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1762 /* The jump pad is now built. Wire in a jump to our jump pad. This
1763 is always done last (by our caller actually), so that we can
1764 install fast tracepoints with threads running. This relies on
1765 the agent's atomic write support. */
1766 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1767 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1770 "E.Jump pad too far from tracepoint "
1771 "(offset 0x%" PRIx64
" > int32).", loffset
);
1775 offset
= (int) loffset
;
1777 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1778 memcpy (buf
+ 1, &offset
, 4);
1779 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1780 *jjump_pad_insn_size
= sizeof (jump_insn
);
1782 /* Return the end address of our pad. */
1783 *jump_entry
= buildaddr
;
1788 #endif /* __x86_64__ */
1790 /* Build a jump pad that saves registers and calls a collection
1791 function. Writes a jump instruction to the jump pad to
1792 JJUMPAD_INSN. The caller is responsible to write it in at the
1793 tracepoint address. */
1796 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1797 CORE_ADDR collector
,
1800 CORE_ADDR
*jump_entry
,
1801 CORE_ADDR
*trampoline
,
1802 ULONGEST
*trampoline_size
,
1803 unsigned char *jjump_pad_insn
,
1804 ULONGEST
*jjump_pad_insn_size
,
1805 CORE_ADDR
*adjusted_insn_addr
,
1806 CORE_ADDR
*adjusted_insn_addr_end
,
1809 unsigned char buf
[0x100];
1811 CORE_ADDR buildaddr
= *jump_entry
;
1813 /* Build the jump pad. */
1815 /* First, do tracepoint data collection. Save registers. */
1817 buf
[i
++] = 0x60; /* pushad */
1818 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1819 *((int *)(buf
+ i
)) = (int) tpaddr
;
1821 buf
[i
++] = 0x9c; /* pushf */
1822 buf
[i
++] = 0x1e; /* push %ds */
1823 buf
[i
++] = 0x06; /* push %es */
1824 buf
[i
++] = 0x0f; /* push %fs */
1826 buf
[i
++] = 0x0f; /* push %gs */
1828 buf
[i
++] = 0x16; /* push %ss */
1829 buf
[i
++] = 0x0e; /* push %cs */
1830 append_insns (&buildaddr
, i
, buf
);
1832 /* Stack space for the collecting_t object. */
1834 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1836 /* Build the object. */
1837 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1838 memcpy (buf
+ i
, &tpoint
, 4);
1840 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1842 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1843 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1844 append_insns (&buildaddr
, i
, buf
);
1846 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1847 If we cared for it, this could be using xchg alternatively. */
1850 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1851 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1853 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1855 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1856 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1857 append_insns (&buildaddr
, i
, buf
);
1860 /* Set up arguments to the gdb_collect call. */
1862 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1863 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1864 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1865 append_insns (&buildaddr
, i
, buf
);
1868 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1869 append_insns (&buildaddr
, i
, buf
);
1872 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1873 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1875 append_insns (&buildaddr
, i
, buf
);
1877 buf
[0] = 0xe8; /* call <reladdr> */
1878 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1879 memcpy (buf
+ 1, &offset
, 4);
1880 append_insns (&buildaddr
, 5, buf
);
1881 /* Clean up after the call. */
1882 buf
[0] = 0x83; /* add $0x8,%esp */
1885 append_insns (&buildaddr
, 3, buf
);
1888 /* Clear the spin-lock. This would need the LOCK prefix on older
1891 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1892 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1893 memcpy (buf
+ i
, &lockaddr
, 4);
1895 append_insns (&buildaddr
, i
, buf
);
1898 /* Remove stack that had been used for the collect_t object. */
1900 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1901 append_insns (&buildaddr
, i
, buf
);
1904 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1907 buf
[i
++] = 0x17; /* pop %ss */
1908 buf
[i
++] = 0x0f; /* pop %gs */
1910 buf
[i
++] = 0x0f; /* pop %fs */
1912 buf
[i
++] = 0x07; /* pop %es */
1913 buf
[i
++] = 0x1f; /* pop %ds */
1914 buf
[i
++] = 0x9d; /* popf */
1915 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1918 buf
[i
++] = 0x61; /* popad */
1919 append_insns (&buildaddr
, i
, buf
);
1921 /* Now, adjust the original instruction to execute in the jump
1923 *adjusted_insn_addr
= buildaddr
;
1924 relocate_instruction (&buildaddr
, tpaddr
);
1925 *adjusted_insn_addr_end
= buildaddr
;
1927 /* Write the jump back to the program. */
1928 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1929 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1930 memcpy (buf
+ 1, &offset
, 4);
1931 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1933 /* The jump pad is now built. Wire in a jump to our jump pad. This
1934 is always done last (by our caller actually), so that we can
1935 install fast tracepoints with threads running. This relies on
1936 the agent's atomic write support. */
1939 /* Create a trampoline. */
1940 *trampoline_size
= sizeof (jump_insn
);
1941 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1943 /* No trampoline space available. */
1945 "E.Cannot allocate trampoline space needed for fast "
1946 "tracepoints on 4-byte instructions.");
1950 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1951 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1952 memcpy (buf
+ 1, &offset
, 4);
1953 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1955 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1956 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1957 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1958 memcpy (buf
+ 2, &offset
, 2);
1959 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1960 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1964 /* Else use a 32-bit relative jump instruction. */
1965 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1966 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1967 memcpy (buf
+ 1, &offset
, 4);
1968 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1969 *jjump_pad_insn_size
= sizeof (jump_insn
);
1972 /* Return the end address of our pad. */
1973 *jump_entry
= buildaddr
;
1979 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1980 CORE_ADDR collector
,
1983 CORE_ADDR
*jump_entry
,
1984 CORE_ADDR
*trampoline
,
1985 ULONGEST
*trampoline_size
,
1986 unsigned char *jjump_pad_insn
,
1987 ULONGEST
*jjump_pad_insn_size
,
1988 CORE_ADDR
*adjusted_insn_addr
,
1989 CORE_ADDR
*adjusted_insn_addr_end
,
1993 if (is_64bit_tdesc ())
1994 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1995 collector
, lockaddr
,
1996 orig_size
, jump_entry
,
1997 trampoline
, trampoline_size
,
1999 jjump_pad_insn_size
,
2001 adjusted_insn_addr_end
,
2005 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2006 collector
, lockaddr
,
2007 orig_size
, jump_entry
,
2008 trampoline
, trampoline_size
,
2010 jjump_pad_insn_size
,
2012 adjusted_insn_addr_end
,
2016 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2020 x86_get_min_fast_tracepoint_insn_len (void)
2022 static int warned_about_fast_tracepoints
= 0;
2025 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2026 used for fast tracepoints. */
2027 if (is_64bit_tdesc ())
2031 if (agent_loaded_p ())
2033 char errbuf
[IPA_BUFSIZ
];
2037 /* On x86, if trampolines are available, then 4-byte jump instructions
2038 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2039 with a 4-byte offset are used instead. */
2040 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2044 /* GDB has no channel to explain to user why a shorter fast
2045 tracepoint is not possible, but at least make GDBserver
2046 mention that something has gone awry. */
2047 if (!warned_about_fast_tracepoints
)
2049 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2050 warned_about_fast_tracepoints
= 1;
2057 /* Indicate that the minimum length is currently unknown since the IPA
2058 has not loaded yet. */
2064 add_insns (unsigned char *start
, int len
)
2066 CORE_ADDR buildaddr
= current_insn_ptr
;
2069 debug_printf ("Adding %d bytes of insn at %s\n",
2070 len
, paddress (buildaddr
));
2072 append_insns (&buildaddr
, len
, start
);
2073 current_insn_ptr
= buildaddr
;
2076 /* Our general strategy for emitting code is to avoid specifying raw
2077 bytes whenever possible, and instead copy a block of inline asm
2078 that is embedded in the function. This is a little messy, because
2079 we need to keep the compiler from discarding what looks like dead
2080 code, plus suppress various warnings. */
2082 #define EMIT_ASM(NAME, INSNS) \
2085 extern unsigned char start_ ## NAME, end_ ## NAME; \
2086 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2087 __asm__ ("jmp end_" #NAME "\n" \
2088 "\t" "start_" #NAME ":" \
2090 "\t" "end_" #NAME ":"); \
2095 #define EMIT_ASM32(NAME,INSNS) \
2098 extern unsigned char start_ ## NAME, end_ ## NAME; \
2099 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2100 __asm__ (".code32\n" \
2101 "\t" "jmp end_" #NAME "\n" \
2102 "\t" "start_" #NAME ":\n" \
2104 "\t" "end_" #NAME ":\n" \
2110 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2117 amd64_emit_prologue (void)
2119 EMIT_ASM (amd64_prologue
,
2121 "movq %rsp,%rbp\n\t"
2122 "sub $0x20,%rsp\n\t"
2123 "movq %rdi,-8(%rbp)\n\t"
2124 "movq %rsi,-16(%rbp)");
2129 amd64_emit_epilogue (void)
2131 EMIT_ASM (amd64_epilogue
,
2132 "movq -16(%rbp),%rdi\n\t"
2133 "movq %rax,(%rdi)\n\t"
2140 amd64_emit_add (void)
2142 EMIT_ASM (amd64_add
,
2143 "add (%rsp),%rax\n\t"
2144 "lea 0x8(%rsp),%rsp");
2148 amd64_emit_sub (void)
2150 EMIT_ASM (amd64_sub
,
2151 "sub %rax,(%rsp)\n\t"
2156 amd64_emit_mul (void)
2162 amd64_emit_lsh (void)
2168 amd64_emit_rsh_signed (void)
2174 amd64_emit_rsh_unsigned (void)
2180 amd64_emit_ext (int arg
)
2185 EMIT_ASM (amd64_ext_8
,
2191 EMIT_ASM (amd64_ext_16
,
2196 EMIT_ASM (amd64_ext_32
,
2205 amd64_emit_log_not (void)
2207 EMIT_ASM (amd64_log_not
,
2208 "test %rax,%rax\n\t"
2214 amd64_emit_bit_and (void)
2216 EMIT_ASM (amd64_and
,
2217 "and (%rsp),%rax\n\t"
2218 "lea 0x8(%rsp),%rsp");
2222 amd64_emit_bit_or (void)
2225 "or (%rsp),%rax\n\t"
2226 "lea 0x8(%rsp),%rsp");
2230 amd64_emit_bit_xor (void)
2232 EMIT_ASM (amd64_xor
,
2233 "xor (%rsp),%rax\n\t"
2234 "lea 0x8(%rsp),%rsp");
2238 amd64_emit_bit_not (void)
2240 EMIT_ASM (amd64_bit_not
,
2241 "xorq $0xffffffffffffffff,%rax");
2245 amd64_emit_equal (void)
2247 EMIT_ASM (amd64_equal
,
2248 "cmp %rax,(%rsp)\n\t"
2249 "je .Lamd64_equal_true\n\t"
2251 "jmp .Lamd64_equal_end\n\t"
2252 ".Lamd64_equal_true:\n\t"
2254 ".Lamd64_equal_end:\n\t"
2255 "lea 0x8(%rsp),%rsp");
2259 amd64_emit_less_signed (void)
2261 EMIT_ASM (amd64_less_signed
,
2262 "cmp %rax,(%rsp)\n\t"
2263 "jl .Lamd64_less_signed_true\n\t"
2265 "jmp .Lamd64_less_signed_end\n\t"
2266 ".Lamd64_less_signed_true:\n\t"
2268 ".Lamd64_less_signed_end:\n\t"
2269 "lea 0x8(%rsp),%rsp");
2273 amd64_emit_less_unsigned (void)
2275 EMIT_ASM (amd64_less_unsigned
,
2276 "cmp %rax,(%rsp)\n\t"
2277 "jb .Lamd64_less_unsigned_true\n\t"
2279 "jmp .Lamd64_less_unsigned_end\n\t"
2280 ".Lamd64_less_unsigned_true:\n\t"
2282 ".Lamd64_less_unsigned_end:\n\t"
2283 "lea 0x8(%rsp),%rsp");
2287 amd64_emit_ref (int size
)
2292 EMIT_ASM (amd64_ref1
,
2296 EMIT_ASM (amd64_ref2
,
2300 EMIT_ASM (amd64_ref4
,
2301 "movl (%rax),%eax");
2304 EMIT_ASM (amd64_ref8
,
2305 "movq (%rax),%rax");
2311 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2313 EMIT_ASM (amd64_if_goto
,
2317 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2325 amd64_emit_goto (int *offset_p
, int *size_p
)
2327 EMIT_ASM (amd64_goto
,
2328 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2336 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2338 int diff
= (to
- (from
+ size
));
2339 unsigned char buf
[sizeof (int)];
2347 memcpy (buf
, &diff
, sizeof (int));
2348 write_inferior_memory (from
, buf
, sizeof (int));
2352 amd64_emit_const (LONGEST num
)
2354 unsigned char buf
[16];
2356 CORE_ADDR buildaddr
= current_insn_ptr
;
2359 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2360 memcpy (&buf
[i
], &num
, sizeof (num
));
2362 append_insns (&buildaddr
, i
, buf
);
2363 current_insn_ptr
= buildaddr
;
2367 amd64_emit_call (CORE_ADDR fn
)
2369 unsigned char buf
[16];
2371 CORE_ADDR buildaddr
;
2374 /* The destination function being in the shared library, may be
2375 >31-bits away off the compiled code pad. */
2377 buildaddr
= current_insn_ptr
;
2379 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2383 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2385 /* Offset is too large for a call. Use callq, but that requires
2386 a register, so avoid it if possible. Use r10, since it is
2387 call-clobbered, we don't have to push/pop it. */
2388 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2390 memcpy (buf
+ i
, &fn
, 8);
2392 buf
[i
++] = 0xff; /* callq *%r10 */
2397 int offset32
= offset64
; /* we know we can't overflow here. */
2398 memcpy (buf
+ i
, &offset32
, 4);
2402 append_insns (&buildaddr
, i
, buf
);
2403 current_insn_ptr
= buildaddr
;
2407 amd64_emit_reg (int reg
)
2409 unsigned char buf
[16];
2411 CORE_ADDR buildaddr
;
2413 /* Assume raw_regs is still in %rdi. */
2414 buildaddr
= current_insn_ptr
;
2416 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2417 memcpy (&buf
[i
], ®
, sizeof (reg
));
2419 append_insns (&buildaddr
, i
, buf
);
2420 current_insn_ptr
= buildaddr
;
2421 amd64_emit_call (get_raw_reg_func_addr ());
2425 amd64_emit_pop (void)
2427 EMIT_ASM (amd64_pop
,
2432 amd64_emit_stack_flush (void)
2434 EMIT_ASM (amd64_stack_flush
,
2439 amd64_emit_zero_ext (int arg
)
2444 EMIT_ASM (amd64_zero_ext_8
,
2448 EMIT_ASM (amd64_zero_ext_16
,
2449 "and $0xffff,%rax");
2452 EMIT_ASM (amd64_zero_ext_32
,
2453 "mov $0xffffffff,%rcx\n\t"
2462 amd64_emit_swap (void)
2464 EMIT_ASM (amd64_swap
,
2471 amd64_emit_stack_adjust (int n
)
2473 unsigned char buf
[16];
2475 CORE_ADDR buildaddr
= current_insn_ptr
;
2478 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2482 /* This only handles adjustments up to 16, but we don't expect any more. */
2484 append_insns (&buildaddr
, i
, buf
);
2485 current_insn_ptr
= buildaddr
;
2488 /* FN's prototype is `LONGEST(*fn)(int)'. */
2491 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2493 unsigned char buf
[16];
2495 CORE_ADDR buildaddr
;
2497 buildaddr
= current_insn_ptr
;
2499 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2500 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2502 append_insns (&buildaddr
, i
, buf
);
2503 current_insn_ptr
= buildaddr
;
2504 amd64_emit_call (fn
);
2507 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2510 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2512 unsigned char buf
[16];
2514 CORE_ADDR buildaddr
;
2516 buildaddr
= current_insn_ptr
;
2518 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2519 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2521 append_insns (&buildaddr
, i
, buf
);
2522 current_insn_ptr
= buildaddr
;
2523 EMIT_ASM (amd64_void_call_2_a
,
2524 /* Save away a copy of the stack top. */
2526 /* Also pass top as the second argument. */
2528 amd64_emit_call (fn
);
2529 EMIT_ASM (amd64_void_call_2_b
,
2530 /* Restore the stack top, %rax may have been trashed. */
2535 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2538 "cmp %rax,(%rsp)\n\t"
2539 "jne .Lamd64_eq_fallthru\n\t"
2540 "lea 0x8(%rsp),%rsp\n\t"
2542 /* jmp, but don't trust the assembler to choose the right jump */
2543 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2544 ".Lamd64_eq_fallthru:\n\t"
2545 "lea 0x8(%rsp),%rsp\n\t"
2555 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2558 "cmp %rax,(%rsp)\n\t"
2559 "je .Lamd64_ne_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_ne_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2575 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2578 "cmp %rax,(%rsp)\n\t"
2579 "jnl .Lamd64_lt_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_lt_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2595 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnle .Lamd64_le_fallthru\n\t"
2600 "lea 0x8(%rsp),%rsp\n\t"
2602 /* jmp, but don't trust the assembler to choose the right jump */
2603 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2604 ".Lamd64_le_fallthru:\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2615 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2618 "cmp %rax,(%rsp)\n\t"
2619 "jng .Lamd64_gt_fallthru\n\t"
2620 "lea 0x8(%rsp),%rsp\n\t"
2622 /* jmp, but don't trust the assembler to choose the right jump */
2623 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2624 ".Lamd64_gt_fallthru:\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2635 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2638 "cmp %rax,(%rsp)\n\t"
2639 "jnge .Lamd64_ge_fallthru\n\t"
2640 ".Lamd64_ge_jump:\n\t"
2641 "lea 0x8(%rsp),%rsp\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Lamd64_ge_fallthru:\n\t"
2646 "lea 0x8(%rsp),%rsp\n\t"
2655 struct emit_ops amd64_emit_ops
=
2657 amd64_emit_prologue
,
2658 amd64_emit_epilogue
,
2663 amd64_emit_rsh_signed
,
2664 amd64_emit_rsh_unsigned
,
2672 amd64_emit_less_signed
,
2673 amd64_emit_less_unsigned
,
2677 amd64_write_goto_address
,
2682 amd64_emit_stack_flush
,
2683 amd64_emit_zero_ext
,
2685 amd64_emit_stack_adjust
,
2686 amd64_emit_int_call_1
,
2687 amd64_emit_void_call_2
,
2696 #endif /* __x86_64__ */
2699 i386_emit_prologue (void)
2701 EMIT_ASM32 (i386_prologue
,
2705 /* At this point, the raw regs base address is at 8(%ebp), and the
2706 value pointer is at 12(%ebp). */
2710 i386_emit_epilogue (void)
2712 EMIT_ASM32 (i386_epilogue
,
2713 "mov 12(%ebp),%ecx\n\t"
2714 "mov %eax,(%ecx)\n\t"
2715 "mov %ebx,0x4(%ecx)\n\t"
2723 i386_emit_add (void)
2725 EMIT_ASM32 (i386_add
,
2726 "add (%esp),%eax\n\t"
2727 "adc 0x4(%esp),%ebx\n\t"
2728 "lea 0x8(%esp),%esp");
2732 i386_emit_sub (void)
2734 EMIT_ASM32 (i386_sub
,
2735 "subl %eax,(%esp)\n\t"
2736 "sbbl %ebx,4(%esp)\n\t"
2742 i386_emit_mul (void)
2748 i386_emit_lsh (void)
2754 i386_emit_rsh_signed (void)
2760 i386_emit_rsh_unsigned (void)
2766 i386_emit_ext (int arg
)
2771 EMIT_ASM32 (i386_ext_8
,
2774 "movl %eax,%ebx\n\t"
2778 EMIT_ASM32 (i386_ext_16
,
2780 "movl %eax,%ebx\n\t"
2784 EMIT_ASM32 (i386_ext_32
,
2785 "movl %eax,%ebx\n\t"
2794 i386_emit_log_not (void)
2796 EMIT_ASM32 (i386_log_not
,
2798 "test %eax,%eax\n\t"
2805 i386_emit_bit_and (void)
2807 EMIT_ASM32 (i386_and
,
2808 "and (%esp),%eax\n\t"
2809 "and 0x4(%esp),%ebx\n\t"
2810 "lea 0x8(%esp),%esp");
2814 i386_emit_bit_or (void)
2816 EMIT_ASM32 (i386_or
,
2817 "or (%esp),%eax\n\t"
2818 "or 0x4(%esp),%ebx\n\t"
2819 "lea 0x8(%esp),%esp");
2823 i386_emit_bit_xor (void)
2825 EMIT_ASM32 (i386_xor
,
2826 "xor (%esp),%eax\n\t"
2827 "xor 0x4(%esp),%ebx\n\t"
2828 "lea 0x8(%esp),%esp");
2832 i386_emit_bit_not (void)
2834 EMIT_ASM32 (i386_bit_not
,
2835 "xor $0xffffffff,%eax\n\t"
2836 "xor $0xffffffff,%ebx\n\t");
2840 i386_emit_equal (void)
2842 EMIT_ASM32 (i386_equal
,
2843 "cmpl %ebx,4(%esp)\n\t"
2844 "jne .Li386_equal_false\n\t"
2845 "cmpl %eax,(%esp)\n\t"
2846 "je .Li386_equal_true\n\t"
2847 ".Li386_equal_false:\n\t"
2849 "jmp .Li386_equal_end\n\t"
2850 ".Li386_equal_true:\n\t"
2852 ".Li386_equal_end:\n\t"
2854 "lea 0x8(%esp),%esp");
2858 i386_emit_less_signed (void)
2860 EMIT_ASM32 (i386_less_signed
,
2861 "cmpl %ebx,4(%esp)\n\t"
2862 "jl .Li386_less_signed_true\n\t"
2863 "jne .Li386_less_signed_false\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jl .Li386_less_signed_true\n\t"
2866 ".Li386_less_signed_false:\n\t"
2868 "jmp .Li386_less_signed_end\n\t"
2869 ".Li386_less_signed_true:\n\t"
2871 ".Li386_less_signed_end:\n\t"
2873 "lea 0x8(%esp),%esp");
2877 i386_emit_less_unsigned (void)
2879 EMIT_ASM32 (i386_less_unsigned
,
2880 "cmpl %ebx,4(%esp)\n\t"
2881 "jb .Li386_less_unsigned_true\n\t"
2882 "jne .Li386_less_unsigned_false\n\t"
2883 "cmpl %eax,(%esp)\n\t"
2884 "jb .Li386_less_unsigned_true\n\t"
2885 ".Li386_less_unsigned_false:\n\t"
2887 "jmp .Li386_less_unsigned_end\n\t"
2888 ".Li386_less_unsigned_true:\n\t"
2890 ".Li386_less_unsigned_end:\n\t"
2892 "lea 0x8(%esp),%esp");
2896 i386_emit_ref (int size
)
2901 EMIT_ASM32 (i386_ref1
,
2905 EMIT_ASM32 (i386_ref2
,
2909 EMIT_ASM32 (i386_ref4
,
2910 "movl (%eax),%eax");
2913 EMIT_ASM32 (i386_ref8
,
2914 "movl 4(%eax),%ebx\n\t"
2915 "movl (%eax),%eax");
2921 i386_emit_if_goto (int *offset_p
, int *size_p
)
2923 EMIT_ASM32 (i386_if_goto
,
2929 /* Don't trust the assembler to choose the right jump */
2930 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2933 *offset_p
= 11; /* be sure that this matches the sequence above */
2939 i386_emit_goto (int *offset_p
, int *size_p
)
2941 EMIT_ASM32 (i386_goto
,
2942 /* Don't trust the assembler to choose the right jump */
2943 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2951 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2953 int diff
= (to
- (from
+ size
));
2954 unsigned char buf
[sizeof (int)];
2956 /* We're only doing 4-byte sizes at the moment. */
2963 memcpy (buf
, &diff
, sizeof (int));
2964 write_inferior_memory (from
, buf
, sizeof (int));
2968 i386_emit_const (LONGEST num
)
2970 unsigned char buf
[16];
2972 CORE_ADDR buildaddr
= current_insn_ptr
;
2975 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2976 lo
= num
& 0xffffffff;
2977 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2979 hi
= ((num
>> 32) & 0xffffffff);
2982 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2983 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2988 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2990 append_insns (&buildaddr
, i
, buf
);
2991 current_insn_ptr
= buildaddr
;
2995 i386_emit_call (CORE_ADDR fn
)
2997 unsigned char buf
[16];
2999 CORE_ADDR buildaddr
;
3001 buildaddr
= current_insn_ptr
;
3003 buf
[i
++] = 0xe8; /* call <reladdr> */
3004 offset
= ((int) fn
) - (buildaddr
+ 5);
3005 memcpy (buf
+ 1, &offset
, 4);
3006 append_insns (&buildaddr
, 5, buf
);
3007 current_insn_ptr
= buildaddr
;
3011 i386_emit_reg (int reg
)
3013 unsigned char buf
[16];
3015 CORE_ADDR buildaddr
;
3017 EMIT_ASM32 (i386_reg_a
,
3019 buildaddr
= current_insn_ptr
;
3021 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3022 memcpy (&buf
[i
], ®
, sizeof (reg
));
3024 append_insns (&buildaddr
, i
, buf
);
3025 current_insn_ptr
= buildaddr
;
3026 EMIT_ASM32 (i386_reg_b
,
3027 "mov %eax,4(%esp)\n\t"
3028 "mov 8(%ebp),%eax\n\t"
3030 i386_emit_call (get_raw_reg_func_addr ());
3031 EMIT_ASM32 (i386_reg_c
,
3033 "lea 0x8(%esp),%esp");
3037 i386_emit_pop (void)
3039 EMIT_ASM32 (i386_pop
,
3045 i386_emit_stack_flush (void)
3047 EMIT_ASM32 (i386_stack_flush
,
3053 i386_emit_zero_ext (int arg
)
3058 EMIT_ASM32 (i386_zero_ext_8
,
3059 "and $0xff,%eax\n\t"
3063 EMIT_ASM32 (i386_zero_ext_16
,
3064 "and $0xffff,%eax\n\t"
3068 EMIT_ASM32 (i386_zero_ext_32
,
3077 i386_emit_swap (void)
3079 EMIT_ASM32 (i386_swap
,
3089 i386_emit_stack_adjust (int n
)
3091 unsigned char buf
[16];
3093 CORE_ADDR buildaddr
= current_insn_ptr
;
3096 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3100 append_insns (&buildaddr
, i
, buf
);
3101 current_insn_ptr
= buildaddr
;
3104 /* FN's prototype is `LONGEST(*fn)(int)'. */
3107 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3109 unsigned char buf
[16];
3111 CORE_ADDR buildaddr
;
3113 EMIT_ASM32 (i386_int_call_1_a
,
3114 /* Reserve a bit of stack space. */
3116 /* Put the one argument on the stack. */
3117 buildaddr
= current_insn_ptr
;
3119 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3122 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3124 append_insns (&buildaddr
, i
, buf
);
3125 current_insn_ptr
= buildaddr
;
3126 i386_emit_call (fn
);
3127 EMIT_ASM32 (i386_int_call_1_c
,
3129 "lea 0x8(%esp),%esp");
3132 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3135 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3137 unsigned char buf
[16];
3139 CORE_ADDR buildaddr
;
3141 EMIT_ASM32 (i386_void_call_2_a
,
3142 /* Preserve %eax only; we don't have to worry about %ebx. */
3144 /* Reserve a bit of stack space for arguments. */
3145 "sub $0x10,%esp\n\t"
3146 /* Copy "top" to the second argument position. (Note that
3147 we can't assume function won't scribble on its
3148 arguments, so don't try to restore from this.) */
3149 "mov %eax,4(%esp)\n\t"
3150 "mov %ebx,8(%esp)");
3151 /* Put the first argument on the stack. */
3152 buildaddr
= current_insn_ptr
;
3154 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3157 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3159 append_insns (&buildaddr
, i
, buf
);
3160 current_insn_ptr
= buildaddr
;
3161 i386_emit_call (fn
);
3162 EMIT_ASM32 (i386_void_call_2_b
,
3163 "lea 0x10(%esp),%esp\n\t"
3164 /* Restore original stack top. */
3170 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3173 /* Check low half first, more likely to be decider */
3174 "cmpl %eax,(%esp)\n\t"
3175 "jne .Leq_fallthru\n\t"
3176 "cmpl %ebx,4(%esp)\n\t"
3177 "jne .Leq_fallthru\n\t"
3178 "lea 0x8(%esp),%esp\n\t"
3181 /* jmp, but don't trust the assembler to choose the right jump */
3182 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3183 ".Leq_fallthru:\n\t"
3184 "lea 0x8(%esp),%esp\n\t"
3195 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3198 /* Check low half first, more likely to be decider */
3199 "cmpl %eax,(%esp)\n\t"
3201 "cmpl %ebx,4(%esp)\n\t"
3202 "je .Lne_fallthru\n\t"
3204 "lea 0x8(%esp),%esp\n\t"
3207 /* jmp, but don't trust the assembler to choose the right jump */
3208 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3209 ".Lne_fallthru:\n\t"
3210 "lea 0x8(%esp),%esp\n\t"
3221 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3224 "cmpl %ebx,4(%esp)\n\t"
3226 "jne .Llt_fallthru\n\t"
3227 "cmpl %eax,(%esp)\n\t"
3228 "jnl .Llt_fallthru\n\t"
3230 "lea 0x8(%esp),%esp\n\t"
3233 /* jmp, but don't trust the assembler to choose the right jump */
3234 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3235 ".Llt_fallthru:\n\t"
3236 "lea 0x8(%esp),%esp\n\t"
3247 i386_emit_le_goto (int *offset_p
, int *size_p
)
3250 "cmpl %ebx,4(%esp)\n\t"
3252 "jne .Lle_fallthru\n\t"
3253 "cmpl %eax,(%esp)\n\t"
3254 "jnle .Lle_fallthru\n\t"
3256 "lea 0x8(%esp),%esp\n\t"
3259 /* jmp, but don't trust the assembler to choose the right jump */
3260 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3261 ".Lle_fallthru:\n\t"
3262 "lea 0x8(%esp),%esp\n\t"
3273 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3276 "cmpl %ebx,4(%esp)\n\t"
3278 "jne .Lgt_fallthru\n\t"
3279 "cmpl %eax,(%esp)\n\t"
3280 "jng .Lgt_fallthru\n\t"
3282 "lea 0x8(%esp),%esp\n\t"
3285 /* jmp, but don't trust the assembler to choose the right jump */
3286 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3287 ".Lgt_fallthru:\n\t"
3288 "lea 0x8(%esp),%esp\n\t"
3299 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3302 "cmpl %ebx,4(%esp)\n\t"
3304 "jne .Lge_fallthru\n\t"
3305 "cmpl %eax,(%esp)\n\t"
3306 "jnge .Lge_fallthru\n\t"
3308 "lea 0x8(%esp),%esp\n\t"
3311 /* jmp, but don't trust the assembler to choose the right jump */
3312 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3313 ".Lge_fallthru:\n\t"
3314 "lea 0x8(%esp),%esp\n\t"
3324 struct emit_ops i386_emit_ops
=
3332 i386_emit_rsh_signed
,
3333 i386_emit_rsh_unsigned
,
3341 i386_emit_less_signed
,
3342 i386_emit_less_unsigned
,
3346 i386_write_goto_address
,
3351 i386_emit_stack_flush
,
3354 i386_emit_stack_adjust
,
3355 i386_emit_int_call_1
,
3356 i386_emit_void_call_2
,
3366 static struct emit_ops
*
3370 if (is_64bit_tdesc ())
3371 return &amd64_emit_ops
;
3374 return &i386_emit_ops
;
3378 x86_supports_range_stepping (void)
3383 /* This is initialized assuming an amd64 target.
3384 x86_arch_setup will correct it for i386 or amd64 targets. */
3386 struct linux_target_ops the_low_target
=
3389 x86_linux_regs_info
,
3390 x86_cannot_fetch_register
,
3391 x86_cannot_store_register
,
3392 NULL
, /* fetch_register */
3402 x86_stopped_by_watchpoint
,
3403 x86_stopped_data_address
,
3404 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3405 native i386 case (no registers smaller than an xfer unit), and are not
3406 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3409 /* need to fix up i386 siginfo if host is amd64 */
3411 x86_linux_new_process
,
3412 x86_linux_new_thread
,
3413 x86_linux_prepare_to_resume
,
3414 x86_linux_process_qsupported
,
3415 x86_supports_tracepoints
,
3416 x86_get_thread_area
,
3417 x86_install_fast_tracepoint_jump_pad
,
3419 x86_get_min_fast_tracepoint_insn_len
,
3420 x86_supports_range_stepping
,
3424 initialize_low_arch (void)
3426 /* Initialize the Linux target descriptions. */
3428 init_registers_amd64_linux ();
3429 init_registers_amd64_avx_linux ();
3430 init_registers_amd64_avx512_linux ();
3431 init_registers_amd64_mpx_linux ();
3433 init_registers_x32_linux ();
3434 init_registers_x32_avx_linux ();
3435 init_registers_x32_avx512_linux ();
3437 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3438 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3439 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3441 init_registers_i386_linux ();
3442 init_registers_i386_mmx_linux ();
3443 init_registers_i386_avx_linux ();
3444 init_registers_i386_avx512_linux ();
3445 init_registers_i386_mpx_linux ();
3447 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3448 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3449 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3451 initialize_regsets_info (&x86_regsets_info
);