1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct i386_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
181 static const int x86_64_regmap
[] =
183 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
184 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
185 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
186 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
187 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
188 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
211 #else /* ! __x86_64__ */
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap
[] =
217 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
218 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
219 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
220 DS
* 4, ES
* 4, FS
* 4, GS
* 4
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
229 /* Returns true if the current inferior belongs to a x86-64 process,
233 is_64bit_tdesc (void)
235 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
237 return register_size (regcache
->tdesc
, 0) == 8;
243 /* Called by libthread_db. */
246 ps_get_thread_area (const struct ps_prochandle
*ph
,
247 lwpid_t lwpid
, int idx
, void **base
)
250 int use_64bit
= is_64bit_tdesc ();
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
261 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
272 unsigned int desc
[4];
274 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
275 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
278 /* Ensure we properly extend the value to 64-bits for x86_64. */
279 *base
= (void *) (uintptr_t) desc
[1];
284 /* Get the thread area address. This is used to recognize which
285 thread is which when tracing with the in-process agent library. We
286 don't read anything from the address, and treat it as opaque; it's
287 the address itself that we assume is unique per-thread. */
290 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
293 int use_64bit
= is_64bit_tdesc ();
298 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
300 *addr
= (CORE_ADDR
) (uintptr_t) base
;
309 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
310 struct thread_info
*thr
= get_lwp_thread (lwp
);
311 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
312 unsigned int desc
[4];
314 const int reg_thread_area
= 3; /* bits to scale down register value. */
317 collect_register_by_name (regcache
, "gs", &gs
);
319 idx
= gs
>> reg_thread_area
;
321 if (ptrace (PTRACE_GET_THREAD_AREA
,
323 (void *) (long) idx
, (unsigned long) &desc
) < 0)
334 x86_cannot_store_register (int regno
)
337 if (is_64bit_tdesc ())
341 return regno
>= I386_NUM_REGS
;
345 x86_cannot_fetch_register (int regno
)
348 if (is_64bit_tdesc ())
352 return regno
>= I386_NUM_REGS
;
356 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
361 if (register_size (regcache
->tdesc
, 0) == 8)
363 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
364 if (x86_64_regmap
[i
] != -1)
365 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
369 /* 32-bit inferior registers need to be zero-extended.
370 Callers would read uninitialized memory otherwise. */
371 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
374 for (i
= 0; i
< I386_NUM_REGS
; i
++)
375 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
377 collect_register_by_name (regcache
, "orig_eax",
378 ((char *) buf
) + ORIG_EAX
* 4);
382 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
387 if (register_size (regcache
->tdesc
, 0) == 8)
389 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
390 if (x86_64_regmap
[i
] != -1)
391 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
396 for (i
= 0; i
< I386_NUM_REGS
; i
++)
397 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
399 supply_register_by_name (regcache
, "orig_eax",
400 ((char *) buf
) + ORIG_EAX
* 4);
404 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
407 i387_cache_to_fxsave (regcache
, buf
);
409 i387_cache_to_fsave (regcache
, buf
);
414 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
417 i387_fxsave_to_cache (regcache
, buf
);
419 i387_fsave_to_cache (regcache
, buf
);
426 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
428 i387_cache_to_fxsave (regcache
, buf
);
432 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
434 i387_fxsave_to_cache (regcache
, buf
);
440 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
442 i387_cache_to_xsave (regcache
, buf
);
446 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
448 i387_xsave_to_cache (regcache
, buf
);
451 /* ??? The non-biarch i386 case stores all the i387 regs twice.
452 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
453 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
454 doesn't work. IWBN to avoid the duplication in the case where it
455 does work. Maybe the arch_setup routine could check whether it works
456 and update the supported regsets accordingly. */
458 static struct regset_info x86_regsets
[] =
460 #ifdef HAVE_PTRACE_GETREGS
461 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
463 x86_fill_gregset
, x86_store_gregset
},
464 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
465 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
467 # ifdef HAVE_PTRACE_GETFPXREGS
468 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
470 x86_fill_fpxregset
, x86_store_fpxregset
},
473 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
475 x86_fill_fpregset
, x86_store_fpregset
},
476 #endif /* HAVE_PTRACE_GETREGS */
477 { 0, 0, 0, -1, -1, NULL
, NULL
}
481 x86_get_pc (struct regcache
*regcache
)
483 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
488 collect_register_by_name (regcache
, "rip", &pc
);
489 return (CORE_ADDR
) pc
;
494 collect_register_by_name (regcache
, "eip", &pc
);
495 return (CORE_ADDR
) pc
;
500 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
502 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
506 unsigned long newpc
= pc
;
507 supply_register_by_name (regcache
, "rip", &newpc
);
511 unsigned int newpc
= pc
;
512 supply_register_by_name (regcache
, "eip", &newpc
);
516 static const unsigned char x86_breakpoint
[] = { 0xCC };
517 #define x86_breakpoint_len 1
520 x86_breakpoint_at (CORE_ADDR pc
)
524 (*the_target
->read_memory
) (pc
, &c
, 1);
531 /* Support for debug registers. */
534 x86_linux_dr_get (ptid_t ptid
, int regnum
)
539 tid
= ptid_get_lwp (ptid
);
542 value
= ptrace (PTRACE_PEEKUSER
, tid
,
543 offsetof (struct user
, u_debugreg
[regnum
]), 0);
545 error ("Couldn't read debug register");
551 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
555 tid
= ptid_get_lwp (ptid
);
558 ptrace (PTRACE_POKEUSER
, tid
,
559 offsetof (struct user
, u_debugreg
[regnum
]), value
);
561 error ("Couldn't write debug register");
565 update_debug_registers_callback (struct inferior_list_entry
*entry
,
568 struct thread_info
*thr
= (struct thread_info
*) entry
;
569 struct lwp_info
*lwp
= get_thread_lwp (thr
);
570 int pid
= *(int *) pid_p
;
572 /* Only update the threads of this process. */
573 if (pid_of (thr
) == pid
)
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp
->arch_private
->debug_registers_changed
= 1;
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
582 linux_stop_lwp (lwp
);
588 /* Update the inferior's debug register REGNUM from STATE. */
591 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
593 /* Only update the threads of this process. */
594 int pid
= pid_of (current_inferior
);
596 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
597 fatal ("Invalid debug register %d", regnum
);
599 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
602 /* Return the inferior's debug register REGNUM. */
605 i386_dr_low_get_addr (int regnum
)
607 ptid_t ptid
= ptid_of (current_inferior
);
609 /* DR6 and DR7 are retrieved with some other way. */
610 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
612 return x86_linux_dr_get (ptid
, regnum
);
615 /* Update the inferior's DR7 debug control register from STATE. */
618 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
620 /* Only update the threads of this process. */
621 int pid
= pid_of (current_inferior
);
623 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
626 /* Return the inferior's DR7 debug control register. */
629 i386_dr_low_get_control (void)
631 ptid_t ptid
= ptid_of (current_inferior
);
633 return x86_linux_dr_get (ptid
, DR_CONTROL
);
636 /* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
640 i386_dr_low_get_status (void)
642 ptid_t ptid
= ptid_of (current_inferior
);
644 return x86_linux_dr_get (ptid
, DR_STATUS
);
647 /* Breakpoint/Watchpoint support. */
650 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
652 struct process_info
*proc
= current_process ();
655 case '0': /* software-breakpoint */
659 ret
= prepare_to_access_memory ();
662 ret
= set_gdb_breakpoint_at (addr
);
663 done_accessing_memory ();
666 case '1': /* hardware-breakpoint */
667 case '2': /* write watchpoint */
668 case '3': /* read watchpoint */
669 case '4': /* access watchpoint */
671 enum target_hw_bp_type hw_type
= Z_packet_to_hw_type (type
);
672 struct i386_debug_reg_state
*state
673 = &proc
->private->arch_private
->debug_reg_state
;
675 return i386_low_insert_watchpoint (state
, hw_type
, addr
, len
);
685 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
687 struct process_info
*proc
= current_process ();
690 case '0': /* software-breakpoint */
694 ret
= prepare_to_access_memory ();
697 ret
= delete_gdb_breakpoint_at (addr
);
698 done_accessing_memory ();
701 case '1': /* hardware-breakpoint */
702 case '2': /* write watchpoint */
703 case '3': /* read watchpoint */
704 case '4': /* access watchpoint */
706 enum target_hw_bp_type hw_type
= Z_packet_to_hw_type (type
);
707 struct i386_debug_reg_state
*state
708 = &proc
->private->arch_private
->debug_reg_state
;
710 return i386_low_remove_watchpoint (state
, hw_type
, addr
, len
);
719 x86_stopped_by_watchpoint (void)
721 struct process_info
*proc
= current_process ();
722 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
726 x86_stopped_data_address (void)
728 struct process_info
*proc
= current_process ();
730 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
736 /* Called when a new process is created. */
738 static struct arch_process_info
*
739 x86_linux_new_process (void)
741 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
743 i386_low_init_dregs (&info
->debug_reg_state
);
748 /* Called when a new thread is detected. */
750 static struct arch_lwp_info
*
751 x86_linux_new_thread (void)
753 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
755 info
->debug_registers_changed
= 1;
760 /* Called when resuming a thread.
761 If the debug regs have changed, update the thread's copies. */
764 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
766 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
767 int clear_status
= 0;
769 if (lwp
->arch_private
->debug_registers_changed
)
772 int pid
= ptid_get_pid (ptid
);
773 struct process_info
*proc
= find_process_pid (pid
);
774 struct i386_debug_reg_state
*state
775 = &proc
->private->arch_private
->debug_reg_state
;
777 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
778 if (state
->dr_ref_count
[i
] > 0)
780 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
782 /* If we're setting a watchpoint, any change the inferior
783 had done itself to the debug registers needs to be
784 discarded, otherwise, i386_low_stopped_data_address can
789 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
791 lwp
->arch_private
->debug_registers_changed
= 0;
794 if (clear_status
|| lwp
->stopped_by_watchpoint
)
795 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
798 /* When GDBSERVER is built as a 64-bit application on linux, the
799 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
800 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
801 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
802 conversion in-place ourselves. */
804 /* These types below (compat_*) define a siginfo type that is layout
805 compatible with the siginfo type exported by the 32-bit userspace
810 typedef int compat_int_t
;
811 typedef unsigned int compat_uptr_t
;
813 typedef int compat_time_t
;
814 typedef int compat_timer_t
;
815 typedef int compat_clock_t
;
817 struct compat_timeval
819 compat_time_t tv_sec
;
823 typedef union compat_sigval
825 compat_int_t sival_int
;
826 compat_uptr_t sival_ptr
;
829 typedef struct compat_siginfo
837 int _pad
[((128 / sizeof (int)) - 3)];
846 /* POSIX.1b timers */
851 compat_sigval_t _sigval
;
854 /* POSIX.1b signals */
859 compat_sigval_t _sigval
;
868 compat_clock_t _utime
;
869 compat_clock_t _stime
;
872 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
887 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
888 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
890 typedef struct compat_x32_siginfo
898 int _pad
[((128 / sizeof (int)) - 3)];
907 /* POSIX.1b timers */
912 compat_sigval_t _sigval
;
915 /* POSIX.1b signals */
920 compat_sigval_t _sigval
;
929 compat_x32_clock_t _utime
;
930 compat_x32_clock_t _stime
;
933 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
946 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
948 #define cpt_si_pid _sifields._kill._pid
949 #define cpt_si_uid _sifields._kill._uid
950 #define cpt_si_timerid _sifields._timer._tid
951 #define cpt_si_overrun _sifields._timer._overrun
952 #define cpt_si_status _sifields._sigchld._status
953 #define cpt_si_utime _sifields._sigchld._utime
954 #define cpt_si_stime _sifields._sigchld._stime
955 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
956 #define cpt_si_addr _sifields._sigfault._addr
957 #define cpt_si_band _sifields._sigpoll._band
958 #define cpt_si_fd _sifields._sigpoll._fd
960 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
961 In their place is si_timer1,si_timer2. */
963 #define si_timerid si_timer1
966 #define si_overrun si_timer2
970 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
972 memset (to
, 0, sizeof (*to
));
974 to
->si_signo
= from
->si_signo
;
975 to
->si_errno
= from
->si_errno
;
976 to
->si_code
= from
->si_code
;
978 if (to
->si_code
== SI_TIMER
)
980 to
->cpt_si_timerid
= from
->si_timerid
;
981 to
->cpt_si_overrun
= from
->si_overrun
;
982 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
984 else if (to
->si_code
== SI_USER
)
986 to
->cpt_si_pid
= from
->si_pid
;
987 to
->cpt_si_uid
= from
->si_uid
;
989 else if (to
->si_code
< 0)
991 to
->cpt_si_pid
= from
->si_pid
;
992 to
->cpt_si_uid
= from
->si_uid
;
993 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
997 switch (to
->si_signo
)
1000 to
->cpt_si_pid
= from
->si_pid
;
1001 to
->cpt_si_uid
= from
->si_uid
;
1002 to
->cpt_si_status
= from
->si_status
;
1003 to
->cpt_si_utime
= from
->si_utime
;
1004 to
->cpt_si_stime
= from
->si_stime
;
1010 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1013 to
->cpt_si_band
= from
->si_band
;
1014 to
->cpt_si_fd
= from
->si_fd
;
1017 to
->cpt_si_pid
= from
->si_pid
;
1018 to
->cpt_si_uid
= from
->si_uid
;
1019 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1026 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1028 memset (to
, 0, sizeof (*to
));
1030 to
->si_signo
= from
->si_signo
;
1031 to
->si_errno
= from
->si_errno
;
1032 to
->si_code
= from
->si_code
;
1034 if (to
->si_code
== SI_TIMER
)
1036 to
->si_timerid
= from
->cpt_si_timerid
;
1037 to
->si_overrun
= from
->cpt_si_overrun
;
1038 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1040 else if (to
->si_code
== SI_USER
)
1042 to
->si_pid
= from
->cpt_si_pid
;
1043 to
->si_uid
= from
->cpt_si_uid
;
1045 else if (to
->si_code
< 0)
1047 to
->si_pid
= from
->cpt_si_pid
;
1048 to
->si_uid
= from
->cpt_si_uid
;
1049 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1053 switch (to
->si_signo
)
1056 to
->si_pid
= from
->cpt_si_pid
;
1057 to
->si_uid
= from
->cpt_si_uid
;
1058 to
->si_status
= from
->cpt_si_status
;
1059 to
->si_utime
= from
->cpt_si_utime
;
1060 to
->si_stime
= from
->cpt_si_stime
;
1066 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1069 to
->si_band
= from
->cpt_si_band
;
1070 to
->si_fd
= from
->cpt_si_fd
;
1073 to
->si_pid
= from
->cpt_si_pid
;
1074 to
->si_uid
= from
->cpt_si_uid
;
1075 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1082 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1085 memset (to
, 0, sizeof (*to
));
1087 to
->si_signo
= from
->si_signo
;
1088 to
->si_errno
= from
->si_errno
;
1089 to
->si_code
= from
->si_code
;
1091 if (to
->si_code
== SI_TIMER
)
1093 to
->cpt_si_timerid
= from
->si_timerid
;
1094 to
->cpt_si_overrun
= from
->si_overrun
;
1095 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1097 else if (to
->si_code
== SI_USER
)
1099 to
->cpt_si_pid
= from
->si_pid
;
1100 to
->cpt_si_uid
= from
->si_uid
;
1102 else if (to
->si_code
< 0)
1104 to
->cpt_si_pid
= from
->si_pid
;
1105 to
->cpt_si_uid
= from
->si_uid
;
1106 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1110 switch (to
->si_signo
)
1113 to
->cpt_si_pid
= from
->si_pid
;
1114 to
->cpt_si_uid
= from
->si_uid
;
1115 to
->cpt_si_status
= from
->si_status
;
1116 to
->cpt_si_utime
= from
->si_utime
;
1117 to
->cpt_si_stime
= from
->si_stime
;
1123 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1126 to
->cpt_si_band
= from
->si_band
;
1127 to
->cpt_si_fd
= from
->si_fd
;
1130 to
->cpt_si_pid
= from
->si_pid
;
1131 to
->cpt_si_uid
= from
->si_uid
;
1132 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1139 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1140 compat_x32_siginfo_t
*from
)
1142 memset (to
, 0, sizeof (*to
));
1144 to
->si_signo
= from
->si_signo
;
1145 to
->si_errno
= from
->si_errno
;
1146 to
->si_code
= from
->si_code
;
1148 if (to
->si_code
== SI_TIMER
)
1150 to
->si_timerid
= from
->cpt_si_timerid
;
1151 to
->si_overrun
= from
->cpt_si_overrun
;
1152 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1154 else if (to
->si_code
== SI_USER
)
1156 to
->si_pid
= from
->cpt_si_pid
;
1157 to
->si_uid
= from
->cpt_si_uid
;
1159 else if (to
->si_code
< 0)
1161 to
->si_pid
= from
->cpt_si_pid
;
1162 to
->si_uid
= from
->cpt_si_uid
;
1163 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1167 switch (to
->si_signo
)
1170 to
->si_pid
= from
->cpt_si_pid
;
1171 to
->si_uid
= from
->cpt_si_uid
;
1172 to
->si_status
= from
->cpt_si_status
;
1173 to
->si_utime
= from
->cpt_si_utime
;
1174 to
->si_stime
= from
->cpt_si_stime
;
1180 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1183 to
->si_band
= from
->cpt_si_band
;
1184 to
->si_fd
= from
->cpt_si_fd
;
1187 to
->si_pid
= from
->cpt_si_pid
;
1188 to
->si_uid
= from
->cpt_si_uid
;
1189 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1195 #endif /* __x86_64__ */
1197 /* Convert a native/host siginfo object, into/from the siginfo in the
1198 layout of the inferiors' architecture. Returns true if any
1199 conversion was done; false otherwise. If DIRECTION is 1, then copy
1200 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1204 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1207 unsigned int machine
;
1208 int tid
= lwpid_of (current_inferior
);
1209 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1211 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1212 if (!is_64bit_tdesc ())
1214 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1215 fatal ("unexpected difference in siginfo");
1218 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1220 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1224 /* No fixup for native x32 GDB. */
1225 else if (!is_elf64
&& sizeof (void *) == 8)
1227 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1228 fatal ("unexpected difference in siginfo");
1231 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1234 siginfo_from_compat_x32_siginfo (native
,
1235 (struct compat_x32_siginfo
*) inf
);
1246 /* Format of XSAVE extended state is:
1249 fxsave_bytes[0..463]
1250 sw_usable_bytes[464..511]
1251 xstate_hdr_bytes[512..575]
1256 Same memory layout will be used for the coredump NT_X86_XSTATE
1257 representing the XSAVE extended state registers.
1259 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1260 extended state mask, which is the same as the extended control register
1261 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1262 together with the mask saved in the xstate_hdr_bytes to determine what
1263 states the processor/OS supports and what state, used or initialized,
1264 the process/thread is in. */
1265 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1267 /* Does the current host support the GETFPXREGS request? The header
1268 file may or may not define it, and even if it is defined, the
1269 kernel will return EIO if it's running on a pre-SSE processor. */
1270 int have_ptrace_getfpxregs
=
1271 #ifdef HAVE_PTRACE_GETFPXREGS
1278 /* Does the current host support PTRACE_GETREGSET? */
1279 static int have_ptrace_getregset
= -1;
1281 /* Get Linux/x86 target description from running target. */
1283 static const struct target_desc
*
1284 x86_linux_read_description (void)
1286 unsigned int machine
;
1290 static uint64_t xcr0
;
1291 struct regset_info
*regset
;
1293 tid
= lwpid_of (current_inferior
);
1295 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1297 if (sizeof (void *) == 4)
1300 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1302 else if (machine
== EM_X86_64
)
1303 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1307 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1308 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1310 elf_fpxregset_t fpxregs
;
1312 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1314 have_ptrace_getfpxregs
= 0;
1315 have_ptrace_getregset
= 0;
1316 return tdesc_i386_mmx_linux
;
1319 have_ptrace_getfpxregs
= 1;
1325 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1327 /* Don't use XML. */
1329 if (machine
== EM_X86_64
)
1330 return tdesc_amd64_linux_no_xml
;
1333 return tdesc_i386_linux_no_xml
;
1336 if (have_ptrace_getregset
== -1)
1338 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1341 iov
.iov_base
= xstateregs
;
1342 iov
.iov_len
= sizeof (xstateregs
);
1344 /* Check if PTRACE_GETREGSET works. */
1345 if (ptrace (PTRACE_GETREGSET
, tid
,
1346 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1347 have_ptrace_getregset
= 0;
1350 have_ptrace_getregset
= 1;
1352 /* Get XCR0 from XSAVE extended state. */
1353 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1354 / sizeof (uint64_t))];
1356 /* Use PTRACE_GETREGSET if it is available. */
1357 for (regset
= x86_regsets
;
1358 regset
->fill_function
!= NULL
; regset
++)
1359 if (regset
->get_request
== PTRACE_GETREGSET
)
1360 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1361 else if (regset
->type
!= GENERAL_REGS
)
1366 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1367 xcr0_features
= (have_ptrace_getregset
1368 && (xcr0
& I386_XSTATE_ALL_MASK
));
1373 if (machine
== EM_X86_64
)
1380 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1382 case I386_XSTATE_AVX512_MASK
:
1383 return tdesc_amd64_avx512_linux
;
1385 case I386_XSTATE_MPX_MASK
:
1386 return tdesc_amd64_mpx_linux
;
1388 case I386_XSTATE_AVX_MASK
:
1389 return tdesc_amd64_avx_linux
;
1392 return tdesc_amd64_linux
;
1396 return tdesc_amd64_linux
;
1402 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1404 case I386_XSTATE_AVX512_MASK
:
1405 return tdesc_x32_avx512_linux
;
1407 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1408 case I386_XSTATE_AVX_MASK
:
1409 return tdesc_x32_avx_linux
;
1412 return tdesc_x32_linux
;
1416 return tdesc_x32_linux
;
1424 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1426 case (I386_XSTATE_AVX512_MASK
):
1427 return tdesc_i386_avx512_linux
;
1429 case (I386_XSTATE_MPX_MASK
):
1430 return tdesc_i386_mpx_linux
;
1432 case (I386_XSTATE_AVX_MASK
):
1433 return tdesc_i386_avx_linux
;
1436 return tdesc_i386_linux
;
1440 return tdesc_i386_linux
;
1443 gdb_assert_not_reached ("failed to return tdesc");
1446 /* Callback for find_inferior. Stops iteration when a thread with a
1447 given PID is found. */
1450 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1452 int pid
= *(int *) data
;
1454 return (ptid_get_pid (entry
->id
) == pid
);
1457 /* Callback for for_each_inferior. Calls the arch_setup routine for
1461 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1463 int pid
= ptid_get_pid (entry
->id
);
1465 /* Look up any thread of this processes. */
1467 = (struct thread_info
*) find_inferior (&all_threads
,
1468 same_process_callback
, &pid
);
1470 the_low_target
.arch_setup ();
1473 /* Update all the target description of all processes; a new GDB
1474 connected, and it may or not support xml target descriptions. */
1477 x86_linux_update_xmltarget (void)
1479 struct thread_info
*save_inferior
= current_inferior
;
1481 /* Before changing the register cache's internal layout, flush the
1482 contents of the current valid caches back to the threads, and
1483 release the current regcache objects. */
1484 regcache_release ();
1486 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1488 current_inferior
= save_inferior
;
1491 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1492 PTRACE_GETREGSET. */
1495 x86_linux_process_qsupported (const char *query
)
1497 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1498 with "i386" in qSupported query, it supports x86 XML target
1501 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1503 char *copy
= xstrdup (query
+ 13);
1506 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1508 if (strcmp (p
, "i386") == 0)
1518 x86_linux_update_xmltarget ();
1521 /* Common for x86/x86-64. */
1523 static struct regsets_info x86_regsets_info
=
1525 x86_regsets
, /* regsets */
1526 0, /* num_regsets */
1527 NULL
, /* disabled_regsets */
1531 static struct regs_info amd64_linux_regs_info
=
1533 NULL
, /* regset_bitmap */
1534 NULL
, /* usrregs_info */
1538 static struct usrregs_info i386_linux_usrregs_info
=
1544 static struct regs_info i386_linux_regs_info
=
1546 NULL
, /* regset_bitmap */
1547 &i386_linux_usrregs_info
,
1551 const struct regs_info
*
1552 x86_linux_regs_info (void)
1555 if (is_64bit_tdesc ())
1556 return &amd64_linux_regs_info
;
1559 return &i386_linux_regs_info
;
1562 /* Initialize the target description for the architecture of the
1566 x86_arch_setup (void)
1568 current_process ()->tdesc
= x86_linux_read_description ();
1572 x86_supports_tracepoints (void)
1578 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1580 write_inferior_memory (*to
, buf
, len
);
1585 push_opcode (unsigned char *buf
, char *op
)
1587 unsigned char *buf_org
= buf
;
1592 unsigned long ul
= strtoul (op
, &endptr
, 16);
1601 return buf
- buf_org
;
1606 /* Build a jump pad that saves registers and calls a collection
1607 function. Writes a jump instruction to the jump pad to
1608 JJUMPAD_INSN. The caller is responsible to write it in at the
1609 tracepoint address. */
1612 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1613 CORE_ADDR collector
,
1616 CORE_ADDR
*jump_entry
,
1617 CORE_ADDR
*trampoline
,
1618 ULONGEST
*trampoline_size
,
1619 unsigned char *jjump_pad_insn
,
1620 ULONGEST
*jjump_pad_insn_size
,
1621 CORE_ADDR
*adjusted_insn_addr
,
1622 CORE_ADDR
*adjusted_insn_addr_end
,
1625 unsigned char buf
[40];
1629 CORE_ADDR buildaddr
= *jump_entry
;
1631 /* Build the jump pad. */
1633 /* First, do tracepoint data collection. Save registers. */
1635 /* Need to ensure stack pointer saved first. */
1636 buf
[i
++] = 0x54; /* push %rsp */
1637 buf
[i
++] = 0x55; /* push %rbp */
1638 buf
[i
++] = 0x57; /* push %rdi */
1639 buf
[i
++] = 0x56; /* push %rsi */
1640 buf
[i
++] = 0x52; /* push %rdx */
1641 buf
[i
++] = 0x51; /* push %rcx */
1642 buf
[i
++] = 0x53; /* push %rbx */
1643 buf
[i
++] = 0x50; /* push %rax */
1644 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1645 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1646 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1647 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1648 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1649 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1650 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1651 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1652 buf
[i
++] = 0x9c; /* pushfq */
1653 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1655 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1656 i
+= sizeof (unsigned long);
1657 buf
[i
++] = 0x57; /* push %rdi */
1658 append_insns (&buildaddr
, i
, buf
);
1660 /* Stack space for the collecting_t object. */
1662 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1663 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1664 memcpy (buf
+ i
, &tpoint
, 8);
1666 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1667 i
+= push_opcode (&buf
[i
],
1668 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1669 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1670 append_insns (&buildaddr
, i
, buf
);
1674 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1675 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1677 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1678 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1679 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1680 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1681 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1682 append_insns (&buildaddr
, i
, buf
);
1684 /* Set up the gdb_collect call. */
1685 /* At this point, (stack pointer + 0x18) is the base of our saved
1689 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1690 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1692 /* tpoint address may be 64-bit wide. */
1693 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1694 memcpy (buf
+ i
, &tpoint
, 8);
1696 append_insns (&buildaddr
, i
, buf
);
1698 /* The collector function being in the shared library, may be
1699 >31-bits away off the jump pad. */
1701 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1702 memcpy (buf
+ i
, &collector
, 8);
1704 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1705 append_insns (&buildaddr
, i
, buf
);
1707 /* Clear the spin-lock. */
1709 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1710 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1711 memcpy (buf
+ i
, &lockaddr
, 8);
1713 append_insns (&buildaddr
, i
, buf
);
1715 /* Remove stack that had been used for the collect_t object. */
1717 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1718 append_insns (&buildaddr
, i
, buf
);
1720 /* Restore register state. */
1722 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1726 buf
[i
++] = 0x9d; /* popfq */
1727 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1728 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1729 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1730 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1731 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1732 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1733 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1734 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1735 buf
[i
++] = 0x58; /* pop %rax */
1736 buf
[i
++] = 0x5b; /* pop %rbx */
1737 buf
[i
++] = 0x59; /* pop %rcx */
1738 buf
[i
++] = 0x5a; /* pop %rdx */
1739 buf
[i
++] = 0x5e; /* pop %rsi */
1740 buf
[i
++] = 0x5f; /* pop %rdi */
1741 buf
[i
++] = 0x5d; /* pop %rbp */
1742 buf
[i
++] = 0x5c; /* pop %rsp */
1743 append_insns (&buildaddr
, i
, buf
);
1745 /* Now, adjust the original instruction to execute in the jump
1747 *adjusted_insn_addr
= buildaddr
;
1748 relocate_instruction (&buildaddr
, tpaddr
);
1749 *adjusted_insn_addr_end
= buildaddr
;
1751 /* Finally, write a jump back to the program. */
1753 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1754 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1757 "E.Jump back from jump pad too far from tracepoint "
1758 "(offset 0x%" PRIx64
" > int32).", loffset
);
1762 offset
= (int) loffset
;
1763 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1764 memcpy (buf
+ 1, &offset
, 4);
1765 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1767 /* The jump pad is now built. Wire in a jump to our jump pad. This
1768 is always done last (by our caller actually), so that we can
1769 install fast tracepoints with threads running. This relies on
1770 the agent's atomic write support. */
1771 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1772 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1775 "E.Jump pad too far from tracepoint "
1776 "(offset 0x%" PRIx64
" > int32).", loffset
);
1780 offset
= (int) loffset
;
1782 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1783 memcpy (buf
+ 1, &offset
, 4);
1784 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1785 *jjump_pad_insn_size
= sizeof (jump_insn
);
1787 /* Return the end address of our pad. */
1788 *jump_entry
= buildaddr
;
1793 #endif /* __x86_64__ */
1795 /* Build a jump pad that saves registers and calls a collection
1796 function. Writes a jump instruction to the jump pad to
1797 JJUMPAD_INSN. The caller is responsible to write it in at the
1798 tracepoint address. */
1801 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1802 CORE_ADDR collector
,
1805 CORE_ADDR
*jump_entry
,
1806 CORE_ADDR
*trampoline
,
1807 ULONGEST
*trampoline_size
,
1808 unsigned char *jjump_pad_insn
,
1809 ULONGEST
*jjump_pad_insn_size
,
1810 CORE_ADDR
*adjusted_insn_addr
,
1811 CORE_ADDR
*adjusted_insn_addr_end
,
1814 unsigned char buf
[0x100];
1816 CORE_ADDR buildaddr
= *jump_entry
;
1818 /* Build the jump pad. */
1820 /* First, do tracepoint data collection. Save registers. */
1822 buf
[i
++] = 0x60; /* pushad */
1823 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1824 *((int *)(buf
+ i
)) = (int) tpaddr
;
1826 buf
[i
++] = 0x9c; /* pushf */
1827 buf
[i
++] = 0x1e; /* push %ds */
1828 buf
[i
++] = 0x06; /* push %es */
1829 buf
[i
++] = 0x0f; /* push %fs */
1831 buf
[i
++] = 0x0f; /* push %gs */
1833 buf
[i
++] = 0x16; /* push %ss */
1834 buf
[i
++] = 0x0e; /* push %cs */
1835 append_insns (&buildaddr
, i
, buf
);
1837 /* Stack space for the collecting_t object. */
1839 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1841 /* Build the object. */
1842 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1843 memcpy (buf
+ i
, &tpoint
, 4);
1845 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1847 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1848 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1849 append_insns (&buildaddr
, i
, buf
);
1851 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1852 If we cared for it, this could be using xchg alternatively. */
1855 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1856 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1858 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1860 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1861 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1862 append_insns (&buildaddr
, i
, buf
);
1865 /* Set up arguments to the gdb_collect call. */
1867 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1868 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1869 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1870 append_insns (&buildaddr
, i
, buf
);
1873 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1874 append_insns (&buildaddr
, i
, buf
);
1877 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1878 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1880 append_insns (&buildaddr
, i
, buf
);
1882 buf
[0] = 0xe8; /* call <reladdr> */
1883 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1884 memcpy (buf
+ 1, &offset
, 4);
1885 append_insns (&buildaddr
, 5, buf
);
1886 /* Clean up after the call. */
1887 buf
[0] = 0x83; /* add $0x8,%esp */
1890 append_insns (&buildaddr
, 3, buf
);
1893 /* Clear the spin-lock. This would need the LOCK prefix on older
1896 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1897 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1898 memcpy (buf
+ i
, &lockaddr
, 4);
1900 append_insns (&buildaddr
, i
, buf
);
1903 /* Remove stack that had been used for the collect_t object. */
1905 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1906 append_insns (&buildaddr
, i
, buf
);
1909 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1912 buf
[i
++] = 0x17; /* pop %ss */
1913 buf
[i
++] = 0x0f; /* pop %gs */
1915 buf
[i
++] = 0x0f; /* pop %fs */
1917 buf
[i
++] = 0x07; /* pop %es */
1918 buf
[i
++] = 0x1f; /* pop %ds */
1919 buf
[i
++] = 0x9d; /* popf */
1920 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1923 buf
[i
++] = 0x61; /* popad */
1924 append_insns (&buildaddr
, i
, buf
);
1926 /* Now, adjust the original instruction to execute in the jump
1928 *adjusted_insn_addr
= buildaddr
;
1929 relocate_instruction (&buildaddr
, tpaddr
);
1930 *adjusted_insn_addr_end
= buildaddr
;
1932 /* Write the jump back to the program. */
1933 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1934 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1935 memcpy (buf
+ 1, &offset
, 4);
1936 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1938 /* The jump pad is now built. Wire in a jump to our jump pad. This
1939 is always done last (by our caller actually), so that we can
1940 install fast tracepoints with threads running. This relies on
1941 the agent's atomic write support. */
1944 /* Create a trampoline. */
1945 *trampoline_size
= sizeof (jump_insn
);
1946 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1948 /* No trampoline space available. */
1950 "E.Cannot allocate trampoline space needed for fast "
1951 "tracepoints on 4-byte instructions.");
1955 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1956 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1957 memcpy (buf
+ 1, &offset
, 4);
1958 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1960 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1961 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1962 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1963 memcpy (buf
+ 2, &offset
, 2);
1964 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1965 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1969 /* Else use a 32-bit relative jump instruction. */
1970 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1971 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1972 memcpy (buf
+ 1, &offset
, 4);
1973 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1974 *jjump_pad_insn_size
= sizeof (jump_insn
);
1977 /* Return the end address of our pad. */
1978 *jump_entry
= buildaddr
;
1984 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1985 CORE_ADDR collector
,
1988 CORE_ADDR
*jump_entry
,
1989 CORE_ADDR
*trampoline
,
1990 ULONGEST
*trampoline_size
,
1991 unsigned char *jjump_pad_insn
,
1992 ULONGEST
*jjump_pad_insn_size
,
1993 CORE_ADDR
*adjusted_insn_addr
,
1994 CORE_ADDR
*adjusted_insn_addr_end
,
1998 if (is_64bit_tdesc ())
1999 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2000 collector
, lockaddr
,
2001 orig_size
, jump_entry
,
2002 trampoline
, trampoline_size
,
2004 jjump_pad_insn_size
,
2006 adjusted_insn_addr_end
,
2010 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2011 collector
, lockaddr
,
2012 orig_size
, jump_entry
,
2013 trampoline
, trampoline_size
,
2015 jjump_pad_insn_size
,
2017 adjusted_insn_addr_end
,
2021 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2025 x86_get_min_fast_tracepoint_insn_len (void)
2027 static int warned_about_fast_tracepoints
= 0;
2030 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2031 used for fast tracepoints. */
2032 if (is_64bit_tdesc ())
2036 if (agent_loaded_p ())
2038 char errbuf
[IPA_BUFSIZ
];
2042 /* On x86, if trampolines are available, then 4-byte jump instructions
2043 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2044 with a 4-byte offset are used instead. */
2045 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2049 /* GDB has no channel to explain to user why a shorter fast
2050 tracepoint is not possible, but at least make GDBserver
2051 mention that something has gone awry. */
2052 if (!warned_about_fast_tracepoints
)
2054 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2055 warned_about_fast_tracepoints
= 1;
2062 /* Indicate that the minimum length is currently unknown since the IPA
2063 has not loaded yet. */
2069 add_insns (unsigned char *start
, int len
)
2071 CORE_ADDR buildaddr
= current_insn_ptr
;
2074 debug_printf ("Adding %d bytes of insn at %s\n",
2075 len
, paddress (buildaddr
));
2077 append_insns (&buildaddr
, len
, start
);
2078 current_insn_ptr
= buildaddr
;
2081 /* Our general strategy for emitting code is to avoid specifying raw
2082 bytes whenever possible, and instead copy a block of inline asm
2083 that is embedded in the function. This is a little messy, because
2084 we need to keep the compiler from discarding what looks like dead
2085 code, plus suppress various warnings. */
2087 #define EMIT_ASM(NAME, INSNS) \
2090 extern unsigned char start_ ## NAME, end_ ## NAME; \
2091 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2092 __asm__ ("jmp end_" #NAME "\n" \
2093 "\t" "start_" #NAME ":" \
2095 "\t" "end_" #NAME ":"); \
2100 #define EMIT_ASM32(NAME,INSNS) \
2103 extern unsigned char start_ ## NAME, end_ ## NAME; \
2104 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2105 __asm__ (".code32\n" \
2106 "\t" "jmp end_" #NAME "\n" \
2107 "\t" "start_" #NAME ":\n" \
2109 "\t" "end_" #NAME ":\n" \
2115 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2122 amd64_emit_prologue (void)
2124 EMIT_ASM (amd64_prologue
,
2126 "movq %rsp,%rbp\n\t"
2127 "sub $0x20,%rsp\n\t"
2128 "movq %rdi,-8(%rbp)\n\t"
2129 "movq %rsi,-16(%rbp)");
2134 amd64_emit_epilogue (void)
2136 EMIT_ASM (amd64_epilogue
,
2137 "movq -16(%rbp),%rdi\n\t"
2138 "movq %rax,(%rdi)\n\t"
2145 amd64_emit_add (void)
2147 EMIT_ASM (amd64_add
,
2148 "add (%rsp),%rax\n\t"
2149 "lea 0x8(%rsp),%rsp");
2153 amd64_emit_sub (void)
2155 EMIT_ASM (amd64_sub
,
2156 "sub %rax,(%rsp)\n\t"
2161 amd64_emit_mul (void)
2167 amd64_emit_lsh (void)
2173 amd64_emit_rsh_signed (void)
2179 amd64_emit_rsh_unsigned (void)
2185 amd64_emit_ext (int arg
)
2190 EMIT_ASM (amd64_ext_8
,
2196 EMIT_ASM (amd64_ext_16
,
2201 EMIT_ASM (amd64_ext_32
,
2210 amd64_emit_log_not (void)
2212 EMIT_ASM (amd64_log_not
,
2213 "test %rax,%rax\n\t"
2219 amd64_emit_bit_and (void)
2221 EMIT_ASM (amd64_and
,
2222 "and (%rsp),%rax\n\t"
2223 "lea 0x8(%rsp),%rsp");
2227 amd64_emit_bit_or (void)
2230 "or (%rsp),%rax\n\t"
2231 "lea 0x8(%rsp),%rsp");
2235 amd64_emit_bit_xor (void)
2237 EMIT_ASM (amd64_xor
,
2238 "xor (%rsp),%rax\n\t"
2239 "lea 0x8(%rsp),%rsp");
2243 amd64_emit_bit_not (void)
2245 EMIT_ASM (amd64_bit_not
,
2246 "xorq $0xffffffffffffffff,%rax");
2250 amd64_emit_equal (void)
2252 EMIT_ASM (amd64_equal
,
2253 "cmp %rax,(%rsp)\n\t"
2254 "je .Lamd64_equal_true\n\t"
2256 "jmp .Lamd64_equal_end\n\t"
2257 ".Lamd64_equal_true:\n\t"
2259 ".Lamd64_equal_end:\n\t"
2260 "lea 0x8(%rsp),%rsp");
2264 amd64_emit_less_signed (void)
2266 EMIT_ASM (amd64_less_signed
,
2267 "cmp %rax,(%rsp)\n\t"
2268 "jl .Lamd64_less_signed_true\n\t"
2270 "jmp .Lamd64_less_signed_end\n\t"
2271 ".Lamd64_less_signed_true:\n\t"
2273 ".Lamd64_less_signed_end:\n\t"
2274 "lea 0x8(%rsp),%rsp");
2278 amd64_emit_less_unsigned (void)
2280 EMIT_ASM (amd64_less_unsigned
,
2281 "cmp %rax,(%rsp)\n\t"
2282 "jb .Lamd64_less_unsigned_true\n\t"
2284 "jmp .Lamd64_less_unsigned_end\n\t"
2285 ".Lamd64_less_unsigned_true:\n\t"
2287 ".Lamd64_less_unsigned_end:\n\t"
2288 "lea 0x8(%rsp),%rsp");
2292 amd64_emit_ref (int size
)
2297 EMIT_ASM (amd64_ref1
,
2301 EMIT_ASM (amd64_ref2
,
2305 EMIT_ASM (amd64_ref4
,
2306 "movl (%rax),%eax");
2309 EMIT_ASM (amd64_ref8
,
2310 "movq (%rax),%rax");
2316 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2318 EMIT_ASM (amd64_if_goto
,
2322 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2330 amd64_emit_goto (int *offset_p
, int *size_p
)
2332 EMIT_ASM (amd64_goto
,
2333 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2341 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2343 int diff
= (to
- (from
+ size
));
2344 unsigned char buf
[sizeof (int)];
2352 memcpy (buf
, &diff
, sizeof (int));
2353 write_inferior_memory (from
, buf
, sizeof (int));
2357 amd64_emit_const (LONGEST num
)
2359 unsigned char buf
[16];
2361 CORE_ADDR buildaddr
= current_insn_ptr
;
2364 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2365 memcpy (&buf
[i
], &num
, sizeof (num
));
2367 append_insns (&buildaddr
, i
, buf
);
2368 current_insn_ptr
= buildaddr
;
2372 amd64_emit_call (CORE_ADDR fn
)
2374 unsigned char buf
[16];
2376 CORE_ADDR buildaddr
;
2379 /* The destination function being in the shared library, may be
2380 >31-bits away off the compiled code pad. */
2382 buildaddr
= current_insn_ptr
;
2384 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2388 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2390 /* Offset is too large for a call. Use callq, but that requires
2391 a register, so avoid it if possible. Use r10, since it is
2392 call-clobbered, we don't have to push/pop it. */
2393 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2395 memcpy (buf
+ i
, &fn
, 8);
2397 buf
[i
++] = 0xff; /* callq *%r10 */
2402 int offset32
= offset64
; /* we know we can't overflow here. */
2403 memcpy (buf
+ i
, &offset32
, 4);
2407 append_insns (&buildaddr
, i
, buf
);
2408 current_insn_ptr
= buildaddr
;
2412 amd64_emit_reg (int reg
)
2414 unsigned char buf
[16];
2416 CORE_ADDR buildaddr
;
2418 /* Assume raw_regs is still in %rdi. */
2419 buildaddr
= current_insn_ptr
;
2421 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2422 memcpy (&buf
[i
], ®
, sizeof (reg
));
2424 append_insns (&buildaddr
, i
, buf
);
2425 current_insn_ptr
= buildaddr
;
2426 amd64_emit_call (get_raw_reg_func_addr ());
2430 amd64_emit_pop (void)
2432 EMIT_ASM (amd64_pop
,
2437 amd64_emit_stack_flush (void)
2439 EMIT_ASM (amd64_stack_flush
,
2444 amd64_emit_zero_ext (int arg
)
2449 EMIT_ASM (amd64_zero_ext_8
,
2453 EMIT_ASM (amd64_zero_ext_16
,
2454 "and $0xffff,%rax");
2457 EMIT_ASM (amd64_zero_ext_32
,
2458 "mov $0xffffffff,%rcx\n\t"
2467 amd64_emit_swap (void)
2469 EMIT_ASM (amd64_swap
,
2476 amd64_emit_stack_adjust (int n
)
2478 unsigned char buf
[16];
2480 CORE_ADDR buildaddr
= current_insn_ptr
;
2483 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2487 /* This only handles adjustments up to 16, but we don't expect any more. */
2489 append_insns (&buildaddr
, i
, buf
);
2490 current_insn_ptr
= buildaddr
;
2493 /* FN's prototype is `LONGEST(*fn)(int)'. */
2496 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2498 unsigned char buf
[16];
2500 CORE_ADDR buildaddr
;
2502 buildaddr
= current_insn_ptr
;
2504 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2505 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2507 append_insns (&buildaddr
, i
, buf
);
2508 current_insn_ptr
= buildaddr
;
2509 amd64_emit_call (fn
);
2512 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2515 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2517 unsigned char buf
[16];
2519 CORE_ADDR buildaddr
;
2521 buildaddr
= current_insn_ptr
;
2523 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2524 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2526 append_insns (&buildaddr
, i
, buf
);
2527 current_insn_ptr
= buildaddr
;
2528 EMIT_ASM (amd64_void_call_2_a
,
2529 /* Save away a copy of the stack top. */
2531 /* Also pass top as the second argument. */
2533 amd64_emit_call (fn
);
2534 EMIT_ASM (amd64_void_call_2_b
,
2535 /* Restore the stack top, %rax may have been trashed. */
2540 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2543 "cmp %rax,(%rsp)\n\t"
2544 "jne .Lamd64_eq_fallthru\n\t"
2545 "lea 0x8(%rsp),%rsp\n\t"
2547 /* jmp, but don't trust the assembler to choose the right jump */
2548 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2549 ".Lamd64_eq_fallthru:\n\t"
2550 "lea 0x8(%rsp),%rsp\n\t"
2560 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2563 "cmp %rax,(%rsp)\n\t"
2564 "je .Lamd64_ne_fallthru\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2567 /* jmp, but don't trust the assembler to choose the right jump */
2568 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2569 ".Lamd64_ne_fallthru:\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2580 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2583 "cmp %rax,(%rsp)\n\t"
2584 "jnl .Lamd64_lt_fallthru\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2587 /* jmp, but don't trust the assembler to choose the right jump */
2588 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2589 ".Lamd64_lt_fallthru:\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2600 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2603 "cmp %rax,(%rsp)\n\t"
2604 "jnle .Lamd64_le_fallthru\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2607 /* jmp, but don't trust the assembler to choose the right jump */
2608 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2609 ".Lamd64_le_fallthru:\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2620 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2623 "cmp %rax,(%rsp)\n\t"
2624 "jng .Lamd64_gt_fallthru\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2627 /* jmp, but don't trust the assembler to choose the right jump */
2628 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2629 ".Lamd64_gt_fallthru:\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2640 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2643 "cmp %rax,(%rsp)\n\t"
2644 "jnge .Lamd64_ge_fallthru\n\t"
2645 ".Lamd64_ge_jump:\n\t"
2646 "lea 0x8(%rsp),%rsp\n\t"
2648 /* jmp, but don't trust the assembler to choose the right jump */
2649 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2650 ".Lamd64_ge_fallthru:\n\t"
2651 "lea 0x8(%rsp),%rsp\n\t"
2660 struct emit_ops amd64_emit_ops
=
2662 amd64_emit_prologue
,
2663 amd64_emit_epilogue
,
2668 amd64_emit_rsh_signed
,
2669 amd64_emit_rsh_unsigned
,
2677 amd64_emit_less_signed
,
2678 amd64_emit_less_unsigned
,
2682 amd64_write_goto_address
,
2687 amd64_emit_stack_flush
,
2688 amd64_emit_zero_ext
,
2690 amd64_emit_stack_adjust
,
2691 amd64_emit_int_call_1
,
2692 amd64_emit_void_call_2
,
2701 #endif /* __x86_64__ */
2704 i386_emit_prologue (void)
2706 EMIT_ASM32 (i386_prologue
,
2710 /* At this point, the raw regs base address is at 8(%ebp), and the
2711 value pointer is at 12(%ebp). */
2715 i386_emit_epilogue (void)
2717 EMIT_ASM32 (i386_epilogue
,
2718 "mov 12(%ebp),%ecx\n\t"
2719 "mov %eax,(%ecx)\n\t"
2720 "mov %ebx,0x4(%ecx)\n\t"
2728 i386_emit_add (void)
2730 EMIT_ASM32 (i386_add
,
2731 "add (%esp),%eax\n\t"
2732 "adc 0x4(%esp),%ebx\n\t"
2733 "lea 0x8(%esp),%esp");
2737 i386_emit_sub (void)
2739 EMIT_ASM32 (i386_sub
,
2740 "subl %eax,(%esp)\n\t"
2741 "sbbl %ebx,4(%esp)\n\t"
2747 i386_emit_mul (void)
2753 i386_emit_lsh (void)
2759 i386_emit_rsh_signed (void)
2765 i386_emit_rsh_unsigned (void)
2771 i386_emit_ext (int arg
)
2776 EMIT_ASM32 (i386_ext_8
,
2779 "movl %eax,%ebx\n\t"
2783 EMIT_ASM32 (i386_ext_16
,
2785 "movl %eax,%ebx\n\t"
2789 EMIT_ASM32 (i386_ext_32
,
2790 "movl %eax,%ebx\n\t"
2799 i386_emit_log_not (void)
2801 EMIT_ASM32 (i386_log_not
,
2803 "test %eax,%eax\n\t"
2810 i386_emit_bit_and (void)
2812 EMIT_ASM32 (i386_and
,
2813 "and (%esp),%eax\n\t"
2814 "and 0x4(%esp),%ebx\n\t"
2815 "lea 0x8(%esp),%esp");
2819 i386_emit_bit_or (void)
2821 EMIT_ASM32 (i386_or
,
2822 "or (%esp),%eax\n\t"
2823 "or 0x4(%esp),%ebx\n\t"
2824 "lea 0x8(%esp),%esp");
2828 i386_emit_bit_xor (void)
2830 EMIT_ASM32 (i386_xor
,
2831 "xor (%esp),%eax\n\t"
2832 "xor 0x4(%esp),%ebx\n\t"
2833 "lea 0x8(%esp),%esp");
2837 i386_emit_bit_not (void)
2839 EMIT_ASM32 (i386_bit_not
,
2840 "xor $0xffffffff,%eax\n\t"
2841 "xor $0xffffffff,%ebx\n\t");
2845 i386_emit_equal (void)
2847 EMIT_ASM32 (i386_equal
,
2848 "cmpl %ebx,4(%esp)\n\t"
2849 "jne .Li386_equal_false\n\t"
2850 "cmpl %eax,(%esp)\n\t"
2851 "je .Li386_equal_true\n\t"
2852 ".Li386_equal_false:\n\t"
2854 "jmp .Li386_equal_end\n\t"
2855 ".Li386_equal_true:\n\t"
2857 ".Li386_equal_end:\n\t"
2859 "lea 0x8(%esp),%esp");
2863 i386_emit_less_signed (void)
2865 EMIT_ASM32 (i386_less_signed
,
2866 "cmpl %ebx,4(%esp)\n\t"
2867 "jl .Li386_less_signed_true\n\t"
2868 "jne .Li386_less_signed_false\n\t"
2869 "cmpl %eax,(%esp)\n\t"
2870 "jl .Li386_less_signed_true\n\t"
2871 ".Li386_less_signed_false:\n\t"
2873 "jmp .Li386_less_signed_end\n\t"
2874 ".Li386_less_signed_true:\n\t"
2876 ".Li386_less_signed_end:\n\t"
2878 "lea 0x8(%esp),%esp");
2882 i386_emit_less_unsigned (void)
2884 EMIT_ASM32 (i386_less_unsigned
,
2885 "cmpl %ebx,4(%esp)\n\t"
2886 "jb .Li386_less_unsigned_true\n\t"
2887 "jne .Li386_less_unsigned_false\n\t"
2888 "cmpl %eax,(%esp)\n\t"
2889 "jb .Li386_less_unsigned_true\n\t"
2890 ".Li386_less_unsigned_false:\n\t"
2892 "jmp .Li386_less_unsigned_end\n\t"
2893 ".Li386_less_unsigned_true:\n\t"
2895 ".Li386_less_unsigned_end:\n\t"
2897 "lea 0x8(%esp),%esp");
2901 i386_emit_ref (int size
)
2906 EMIT_ASM32 (i386_ref1
,
2910 EMIT_ASM32 (i386_ref2
,
2914 EMIT_ASM32 (i386_ref4
,
2915 "movl (%eax),%eax");
2918 EMIT_ASM32 (i386_ref8
,
2919 "movl 4(%eax),%ebx\n\t"
2920 "movl (%eax),%eax");
2926 i386_emit_if_goto (int *offset_p
, int *size_p
)
2928 EMIT_ASM32 (i386_if_goto
,
2934 /* Don't trust the assembler to choose the right jump */
2935 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2938 *offset_p
= 11; /* be sure that this matches the sequence above */
2944 i386_emit_goto (int *offset_p
, int *size_p
)
2946 EMIT_ASM32 (i386_goto
,
2947 /* Don't trust the assembler to choose the right jump */
2948 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2956 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2958 int diff
= (to
- (from
+ size
));
2959 unsigned char buf
[sizeof (int)];
2961 /* We're only doing 4-byte sizes at the moment. */
2968 memcpy (buf
, &diff
, sizeof (int));
2969 write_inferior_memory (from
, buf
, sizeof (int));
2973 i386_emit_const (LONGEST num
)
2975 unsigned char buf
[16];
2977 CORE_ADDR buildaddr
= current_insn_ptr
;
2980 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2981 lo
= num
& 0xffffffff;
2982 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2984 hi
= ((num
>> 32) & 0xffffffff);
2987 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2988 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2993 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2995 append_insns (&buildaddr
, i
, buf
);
2996 current_insn_ptr
= buildaddr
;
3000 i386_emit_call (CORE_ADDR fn
)
3002 unsigned char buf
[16];
3004 CORE_ADDR buildaddr
;
3006 buildaddr
= current_insn_ptr
;
3008 buf
[i
++] = 0xe8; /* call <reladdr> */
3009 offset
= ((int) fn
) - (buildaddr
+ 5);
3010 memcpy (buf
+ 1, &offset
, 4);
3011 append_insns (&buildaddr
, 5, buf
);
3012 current_insn_ptr
= buildaddr
;
3016 i386_emit_reg (int reg
)
3018 unsigned char buf
[16];
3020 CORE_ADDR buildaddr
;
3022 EMIT_ASM32 (i386_reg_a
,
3024 buildaddr
= current_insn_ptr
;
3026 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3027 memcpy (&buf
[i
], ®
, sizeof (reg
));
3029 append_insns (&buildaddr
, i
, buf
);
3030 current_insn_ptr
= buildaddr
;
3031 EMIT_ASM32 (i386_reg_b
,
3032 "mov %eax,4(%esp)\n\t"
3033 "mov 8(%ebp),%eax\n\t"
3035 i386_emit_call (get_raw_reg_func_addr ());
3036 EMIT_ASM32 (i386_reg_c
,
3038 "lea 0x8(%esp),%esp");
3042 i386_emit_pop (void)
3044 EMIT_ASM32 (i386_pop
,
3050 i386_emit_stack_flush (void)
3052 EMIT_ASM32 (i386_stack_flush
,
3058 i386_emit_zero_ext (int arg
)
3063 EMIT_ASM32 (i386_zero_ext_8
,
3064 "and $0xff,%eax\n\t"
3068 EMIT_ASM32 (i386_zero_ext_16
,
3069 "and $0xffff,%eax\n\t"
3073 EMIT_ASM32 (i386_zero_ext_32
,
3082 i386_emit_swap (void)
3084 EMIT_ASM32 (i386_swap
,
3094 i386_emit_stack_adjust (int n
)
3096 unsigned char buf
[16];
3098 CORE_ADDR buildaddr
= current_insn_ptr
;
3101 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3105 append_insns (&buildaddr
, i
, buf
);
3106 current_insn_ptr
= buildaddr
;
3109 /* FN's prototype is `LONGEST(*fn)(int)'. */
3112 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3114 unsigned char buf
[16];
3116 CORE_ADDR buildaddr
;
3118 EMIT_ASM32 (i386_int_call_1_a
,
3119 /* Reserve a bit of stack space. */
3121 /* Put the one argument on the stack. */
3122 buildaddr
= current_insn_ptr
;
3124 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3127 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3129 append_insns (&buildaddr
, i
, buf
);
3130 current_insn_ptr
= buildaddr
;
3131 i386_emit_call (fn
);
3132 EMIT_ASM32 (i386_int_call_1_c
,
3134 "lea 0x8(%esp),%esp");
3137 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3140 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3142 unsigned char buf
[16];
3144 CORE_ADDR buildaddr
;
3146 EMIT_ASM32 (i386_void_call_2_a
,
3147 /* Preserve %eax only; we don't have to worry about %ebx. */
3149 /* Reserve a bit of stack space for arguments. */
3150 "sub $0x10,%esp\n\t"
3151 /* Copy "top" to the second argument position. (Note that
3152 we can't assume function won't scribble on its
3153 arguments, so don't try to restore from this.) */
3154 "mov %eax,4(%esp)\n\t"
3155 "mov %ebx,8(%esp)");
3156 /* Put the first argument on the stack. */
3157 buildaddr
= current_insn_ptr
;
3159 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3162 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3164 append_insns (&buildaddr
, i
, buf
);
3165 current_insn_ptr
= buildaddr
;
3166 i386_emit_call (fn
);
3167 EMIT_ASM32 (i386_void_call_2_b
,
3168 "lea 0x10(%esp),%esp\n\t"
3169 /* Restore original stack top. */
3175 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3178 /* Check low half first, more likely to be decider */
3179 "cmpl %eax,(%esp)\n\t"
3180 "jne .Leq_fallthru\n\t"
3181 "cmpl %ebx,4(%esp)\n\t"
3182 "jne .Leq_fallthru\n\t"
3183 "lea 0x8(%esp),%esp\n\t"
3186 /* jmp, but don't trust the assembler to choose the right jump */
3187 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3188 ".Leq_fallthru:\n\t"
3189 "lea 0x8(%esp),%esp\n\t"
3200 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3203 /* Check low half first, more likely to be decider */
3204 "cmpl %eax,(%esp)\n\t"
3206 "cmpl %ebx,4(%esp)\n\t"
3207 "je .Lne_fallthru\n\t"
3209 "lea 0x8(%esp),%esp\n\t"
3212 /* jmp, but don't trust the assembler to choose the right jump */
3213 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3214 ".Lne_fallthru:\n\t"
3215 "lea 0x8(%esp),%esp\n\t"
3226 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3229 "cmpl %ebx,4(%esp)\n\t"
3231 "jne .Llt_fallthru\n\t"
3232 "cmpl %eax,(%esp)\n\t"
3233 "jnl .Llt_fallthru\n\t"
3235 "lea 0x8(%esp),%esp\n\t"
3238 /* jmp, but don't trust the assembler to choose the right jump */
3239 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3240 ".Llt_fallthru:\n\t"
3241 "lea 0x8(%esp),%esp\n\t"
3252 i386_emit_le_goto (int *offset_p
, int *size_p
)
3255 "cmpl %ebx,4(%esp)\n\t"
3257 "jne .Lle_fallthru\n\t"
3258 "cmpl %eax,(%esp)\n\t"
3259 "jnle .Lle_fallthru\n\t"
3261 "lea 0x8(%esp),%esp\n\t"
3264 /* jmp, but don't trust the assembler to choose the right jump */
3265 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3266 ".Lle_fallthru:\n\t"
3267 "lea 0x8(%esp),%esp\n\t"
3278 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3281 "cmpl %ebx,4(%esp)\n\t"
3283 "jne .Lgt_fallthru\n\t"
3284 "cmpl %eax,(%esp)\n\t"
3285 "jng .Lgt_fallthru\n\t"
3287 "lea 0x8(%esp),%esp\n\t"
3290 /* jmp, but don't trust the assembler to choose the right jump */
3291 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3292 ".Lgt_fallthru:\n\t"
3293 "lea 0x8(%esp),%esp\n\t"
3304 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3307 "cmpl %ebx,4(%esp)\n\t"
3309 "jne .Lge_fallthru\n\t"
3310 "cmpl %eax,(%esp)\n\t"
3311 "jnge .Lge_fallthru\n\t"
3313 "lea 0x8(%esp),%esp\n\t"
3316 /* jmp, but don't trust the assembler to choose the right jump */
3317 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3318 ".Lge_fallthru:\n\t"
3319 "lea 0x8(%esp),%esp\n\t"
3329 struct emit_ops i386_emit_ops
=
3337 i386_emit_rsh_signed
,
3338 i386_emit_rsh_unsigned
,
3346 i386_emit_less_signed
,
3347 i386_emit_less_unsigned
,
3351 i386_write_goto_address
,
3356 i386_emit_stack_flush
,
3359 i386_emit_stack_adjust
,
3360 i386_emit_int_call_1
,
3361 i386_emit_void_call_2
,
3371 static struct emit_ops
*
3375 if (is_64bit_tdesc ())
3376 return &amd64_emit_ops
;
3379 return &i386_emit_ops
;
3383 x86_supports_range_stepping (void)
3388 /* This is initialized assuming an amd64 target.
3389 x86_arch_setup will correct it for i386 or amd64 targets. */
3391 struct linux_target_ops the_low_target
=
3394 x86_linux_regs_info
,
3395 x86_cannot_fetch_register
,
3396 x86_cannot_store_register
,
3397 NULL
, /* fetch_register */
3407 x86_stopped_by_watchpoint
,
3408 x86_stopped_data_address
,
3409 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3410 native i386 case (no registers smaller than an xfer unit), and are not
3411 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3414 /* need to fix up i386 siginfo if host is amd64 */
3416 x86_linux_new_process
,
3417 x86_linux_new_thread
,
3418 x86_linux_prepare_to_resume
,
3419 x86_linux_process_qsupported
,
3420 x86_supports_tracepoints
,
3421 x86_get_thread_area
,
3422 x86_install_fast_tracepoint_jump_pad
,
3424 x86_get_min_fast_tracepoint_insn_len
,
3425 x86_supports_range_stepping
,
3429 initialize_low_arch (void)
3431 /* Initialize the Linux target descriptions. */
3433 init_registers_amd64_linux ();
3434 init_registers_amd64_avx_linux ();
3435 init_registers_amd64_avx512_linux ();
3436 init_registers_amd64_mpx_linux ();
3438 init_registers_x32_linux ();
3439 init_registers_x32_avx_linux ();
3440 init_registers_x32_avx512_linux ();
3442 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3443 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3444 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3446 init_registers_i386_linux ();
3447 init_registers_i386_mmx_linux ();
3448 init_registers_i386_avx_linux ();
3449 init_registers_i386_avx512_linux ();
3450 init_registers_i386_mpx_linux ();
3452 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3453 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3454 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3456 initialize_regsets_info (&x86_regsets_info
);