1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-mpx-linux.c. */
52 void init_registers_amd64_mpx_linux (void);
53 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
55 /* Defined in auto-generated file x32-linux.c. */
56 void init_registers_x32_linux (void);
57 extern const struct target_desc
*tdesc_x32_linux
;
59 /* Defined in auto-generated file x32-avx-linux.c. */
60 void init_registers_x32_avx_linux (void);
61 extern const struct target_desc
*tdesc_x32_avx_linux
;
65 /* Defined in auto-generated file i386-linux.c. */
66 void init_registers_i386_linux (void);
67 extern const struct target_desc
*tdesc_i386_linux
;
69 /* Defined in auto-generated file i386-mmx-linux.c. */
70 void init_registers_i386_mmx_linux (void);
71 extern const struct target_desc
*tdesc_i386_mmx_linux
;
73 /* Defined in auto-generated file i386-avx-linux.c. */
74 void init_registers_i386_avx_linux (void);
75 extern const struct target_desc
*tdesc_i386_avx_linux
;
77 /* Defined in auto-generated file i386-mpx-linux.c. */
78 void init_registers_i386_mpx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mpx_linux
;
82 static struct target_desc
*tdesc_amd64_linux_no_xml
;
84 static struct target_desc
*tdesc_i386_linux_no_xml
;
87 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
88 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
90 /* Backward compatibility for gdb without XML support. */
92 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
93 <architecture>i386</architecture>\
94 <osabi>GNU/Linux</osabi>\
98 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
99 <architecture>i386:x86-64</architecture>\
100 <osabi>GNU/Linux</osabi>\
105 #include <sys/procfs.h>
106 #include <sys/ptrace.h>
109 #ifndef PTRACE_GETREGSET
110 #define PTRACE_GETREGSET 0x4204
113 #ifndef PTRACE_SETREGSET
114 #define PTRACE_SETREGSET 0x4205
118 #ifndef PTRACE_GET_THREAD_AREA
119 #define PTRACE_GET_THREAD_AREA 25
122 /* This definition comes from prctl.h, but some kernels may not have it. */
123 #ifndef PTRACE_ARCH_PRCTL
124 #define PTRACE_ARCH_PRCTL 30
127 /* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
130 #define ARCH_SET_GS 0x1001
131 #define ARCH_SET_FS 0x1002
132 #define ARCH_GET_FS 0x1003
133 #define ARCH_GET_GS 0x1004
136 /* Per-process arch-specific data we want to keep. */
138 struct arch_process_info
140 struct i386_debug_reg_state debug_reg_state
;
143 /* Per-thread arch-specific data we want to keep. */
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed
;
153 /* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156 static /*const*/ int i386_regmap
[] =
158 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
159 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
160 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
161 DS
* 8, ES
* 8, FS
* 8, GS
* 8
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
166 /* So code below doesn't have to care, i386 or amd64. */
167 #define ORIG_EAX ORIG_RAX
169 static const int x86_64_regmap
[] =
171 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
172 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
173 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
174 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
175 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
176 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
189 #else /* ! __x86_64__ */
191 /* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193 static /*const*/ int i386_regmap
[] =
195 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
196 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
197 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
198 DS
* 4, ES
* 4, FS
* 4, GS
* 4
201 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
207 /* Returns true if the current inferior belongs to a x86-64 process,
211 is_64bit_tdesc (void)
213 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
215 return register_size (regcache
->tdesc
, 0) == 8;
221 /* Called by libthread_db. */
224 ps_get_thread_area (const struct ps_prochandle
*ph
,
225 lwpid_t lwpid
, int idx
, void **base
)
228 int use_64bit
= is_64bit_tdesc ();
235 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
239 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
250 unsigned int desc
[4];
252 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
253 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base
= (void *) (uintptr_t) desc
[1];
262 /* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
268 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
271 int use_64bit
= is_64bit_tdesc ();
276 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
278 *addr
= (CORE_ADDR
) (uintptr_t) base
;
287 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
288 struct thread_info
*thr
= get_lwp_thread (lwp
);
289 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
290 unsigned int desc
[4];
292 const int reg_thread_area
= 3; /* bits to scale down register value. */
295 collect_register_by_name (regcache
, "gs", &gs
);
297 idx
= gs
>> reg_thread_area
;
299 if (ptrace (PTRACE_GET_THREAD_AREA
,
301 (void *) (long) idx
, (unsigned long) &desc
) < 0)
312 x86_cannot_store_register (int regno
)
315 if (is_64bit_tdesc ())
319 return regno
>= I386_NUM_REGS
;
323 x86_cannot_fetch_register (int regno
)
326 if (is_64bit_tdesc ())
330 return regno
>= I386_NUM_REGS
;
334 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
339 if (register_size (regcache
->tdesc
, 0) == 8)
341 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
342 if (x86_64_regmap
[i
] != -1)
343 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
348 for (i
= 0; i
< I386_NUM_REGS
; i
++)
349 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
351 collect_register_by_name (regcache
, "orig_eax",
352 ((char *) buf
) + ORIG_EAX
* 4);
356 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
361 if (register_size (regcache
->tdesc
, 0) == 8)
363 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
364 if (x86_64_regmap
[i
] != -1)
365 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
370 for (i
= 0; i
< I386_NUM_REGS
; i
++)
371 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
373 supply_register_by_name (regcache
, "orig_eax",
374 ((char *) buf
) + ORIG_EAX
* 4);
378 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
381 i387_cache_to_fxsave (regcache
, buf
);
383 i387_cache_to_fsave (regcache
, buf
);
388 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
391 i387_fxsave_to_cache (regcache
, buf
);
393 i387_fsave_to_cache (regcache
, buf
);
400 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
402 i387_cache_to_fxsave (regcache
, buf
);
406 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
408 i387_fxsave_to_cache (regcache
, buf
);
414 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
416 i387_cache_to_xsave (regcache
, buf
);
420 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
422 i387_xsave_to_cache (regcache
, buf
);
425 /* ??? The non-biarch i386 case stores all the i387 regs twice.
426 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
427 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
428 doesn't work. IWBN to avoid the duplication in the case where it
429 does work. Maybe the arch_setup routine could check whether it works
430 and update the supported regsets accordingly. */
432 static struct regset_info x86_regsets
[] =
434 #ifdef HAVE_PTRACE_GETREGS
435 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
437 x86_fill_gregset
, x86_store_gregset
},
438 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
439 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
441 # ifdef HAVE_PTRACE_GETFPXREGS
442 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
444 x86_fill_fpxregset
, x86_store_fpxregset
},
447 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
449 x86_fill_fpregset
, x86_store_fpregset
},
450 #endif /* HAVE_PTRACE_GETREGS */
451 { 0, 0, 0, -1, -1, NULL
, NULL
}
455 x86_get_pc (struct regcache
*regcache
)
457 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
462 collect_register_by_name (regcache
, "rip", &pc
);
463 return (CORE_ADDR
) pc
;
468 collect_register_by_name (regcache
, "eip", &pc
);
469 return (CORE_ADDR
) pc
;
474 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
476 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
480 unsigned long newpc
= pc
;
481 supply_register_by_name (regcache
, "rip", &newpc
);
485 unsigned int newpc
= pc
;
486 supply_register_by_name (regcache
, "eip", &newpc
);
490 static const unsigned char x86_breakpoint
[] = { 0xCC };
491 #define x86_breakpoint_len 1
494 x86_breakpoint_at (CORE_ADDR pc
)
498 (*the_target
->read_memory
) (pc
, &c
, 1);
505 /* Support for debug registers. */
508 x86_linux_dr_get (ptid_t ptid
, int regnum
)
513 tid
= ptid_get_lwp (ptid
);
516 value
= ptrace (PTRACE_PEEKUSER
, tid
,
517 offsetof (struct user
, u_debugreg
[regnum
]), 0);
519 error ("Couldn't read debug register");
525 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
529 tid
= ptid_get_lwp (ptid
);
532 ptrace (PTRACE_POKEUSER
, tid
,
533 offsetof (struct user
, u_debugreg
[regnum
]), value
);
535 error ("Couldn't write debug register");
539 update_debug_registers_callback (struct inferior_list_entry
*entry
,
542 struct thread_info
*thr
= (struct thread_info
*) entry
;
543 struct lwp_info
*lwp
= get_thread_lwp (thr
);
544 int pid
= *(int *) pid_p
;
546 /* Only update the threads of this process. */
547 if (pid_of (thr
) == pid
)
549 /* The actual update is done later just before resuming the lwp,
550 we just mark that the registers need updating. */
551 lwp
->arch_private
->debug_registers_changed
= 1;
553 /* If the lwp isn't stopped, force it to momentarily pause, so
554 we can update its debug registers. */
556 linux_stop_lwp (lwp
);
562 /* Update the inferior's debug register REGNUM from STATE. */
565 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
567 /* Only update the threads of this process. */
568 int pid
= pid_of (current_inferior
);
570 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
571 fatal ("Invalid debug register %d", regnum
);
573 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
576 /* Return the inferior's debug register REGNUM. */
579 i386_dr_low_get_addr (int regnum
)
581 ptid_t ptid
= ptid_of (current_inferior
);
583 /* DR6 and DR7 are retrieved with some other way. */
584 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
586 return x86_linux_dr_get (ptid
, regnum
);
589 /* Update the inferior's DR7 debug control register from STATE. */
592 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
594 /* Only update the threads of this process. */
595 int pid
= pid_of (current_inferior
);
597 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
600 /* Return the inferior's DR7 debug control register. */
603 i386_dr_low_get_control (void)
605 ptid_t ptid
= ptid_of (current_inferior
);
607 return x86_linux_dr_get (ptid
, DR_CONTROL
);
610 /* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
614 i386_dr_low_get_status (void)
616 ptid_t ptid
= ptid_of (current_inferior
);
618 return x86_linux_dr_get (ptid
, DR_STATUS
);
621 /* Breakpoint/Watchpoint support. */
624 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
626 struct process_info
*proc
= current_process ();
629 case '0': /* software-breakpoint */
633 ret
= prepare_to_access_memory ();
636 ret
= set_gdb_breakpoint_at (addr
);
637 done_accessing_memory ();
640 case '1': /* hardware-breakpoint */
641 case '2': /* write watchpoint */
642 case '3': /* read watchpoint */
643 case '4': /* access watchpoint */
644 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
654 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
656 struct process_info
*proc
= current_process ();
659 case '0': /* software-breakpoint */
663 ret
= prepare_to_access_memory ();
666 ret
= delete_gdb_breakpoint_at (addr
);
667 done_accessing_memory ();
670 case '1': /* hardware-breakpoint */
671 case '2': /* write watchpoint */
672 case '3': /* read watchpoint */
673 case '4': /* access watchpoint */
674 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
683 x86_stopped_by_watchpoint (void)
685 struct process_info
*proc
= current_process ();
686 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
690 x86_stopped_data_address (void)
692 struct process_info
*proc
= current_process ();
694 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
700 /* Called when a new process is created. */
702 static struct arch_process_info
*
703 x86_linux_new_process (void)
705 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
707 i386_low_init_dregs (&info
->debug_reg_state
);
712 /* Called when a new thread is detected. */
714 static struct arch_lwp_info
*
715 x86_linux_new_thread (void)
717 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
719 info
->debug_registers_changed
= 1;
724 /* Called when resuming a thread.
725 If the debug regs have changed, update the thread's copies. */
728 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
730 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
731 int clear_status
= 0;
733 if (lwp
->arch_private
->debug_registers_changed
)
736 int pid
= ptid_get_pid (ptid
);
737 struct process_info
*proc
= find_process_pid (pid
);
738 struct i386_debug_reg_state
*state
739 = &proc
->private->arch_private
->debug_reg_state
;
741 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
742 if (state
->dr_ref_count
[i
] > 0)
744 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
746 /* If we're setting a watchpoint, any change the inferior
747 had done itself to the debug registers needs to be
748 discarded, otherwise, i386_low_stopped_data_address can
753 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
755 lwp
->arch_private
->debug_registers_changed
= 0;
758 if (clear_status
|| lwp
->stopped_by_watchpoint
)
759 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
762 /* When GDBSERVER is built as a 64-bit application on linux, the
763 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
764 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
765 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
766 conversion in-place ourselves. */
768 /* These types below (compat_*) define a siginfo type that is layout
769 compatible with the siginfo type exported by the 32-bit userspace
774 typedef int compat_int_t
;
775 typedef unsigned int compat_uptr_t
;
777 typedef int compat_time_t
;
778 typedef int compat_timer_t
;
779 typedef int compat_clock_t
;
781 struct compat_timeval
783 compat_time_t tv_sec
;
787 typedef union compat_sigval
789 compat_int_t sival_int
;
790 compat_uptr_t sival_ptr
;
793 typedef struct compat_siginfo
801 int _pad
[((128 / sizeof (int)) - 3)];
810 /* POSIX.1b timers */
815 compat_sigval_t _sigval
;
818 /* POSIX.1b signals */
823 compat_sigval_t _sigval
;
832 compat_clock_t _utime
;
833 compat_clock_t _stime
;
836 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
851 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
852 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
854 typedef struct compat_x32_siginfo
862 int _pad
[((128 / sizeof (int)) - 3)];
871 /* POSIX.1b timers */
876 compat_sigval_t _sigval
;
879 /* POSIX.1b signals */
884 compat_sigval_t _sigval
;
893 compat_x32_clock_t _utime
;
894 compat_x32_clock_t _stime
;
897 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
910 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
912 #define cpt_si_pid _sifields._kill._pid
913 #define cpt_si_uid _sifields._kill._uid
914 #define cpt_si_timerid _sifields._timer._tid
915 #define cpt_si_overrun _sifields._timer._overrun
916 #define cpt_si_status _sifields._sigchld._status
917 #define cpt_si_utime _sifields._sigchld._utime
918 #define cpt_si_stime _sifields._sigchld._stime
919 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
920 #define cpt_si_addr _sifields._sigfault._addr
921 #define cpt_si_band _sifields._sigpoll._band
922 #define cpt_si_fd _sifields._sigpoll._fd
924 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
925 In their place is si_timer1,si_timer2. */
927 #define si_timerid si_timer1
930 #define si_overrun si_timer2
934 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
936 memset (to
, 0, sizeof (*to
));
938 to
->si_signo
= from
->si_signo
;
939 to
->si_errno
= from
->si_errno
;
940 to
->si_code
= from
->si_code
;
942 if (to
->si_code
== SI_TIMER
)
944 to
->cpt_si_timerid
= from
->si_timerid
;
945 to
->cpt_si_overrun
= from
->si_overrun
;
946 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
948 else if (to
->si_code
== SI_USER
)
950 to
->cpt_si_pid
= from
->si_pid
;
951 to
->cpt_si_uid
= from
->si_uid
;
953 else if (to
->si_code
< 0)
955 to
->cpt_si_pid
= from
->si_pid
;
956 to
->cpt_si_uid
= from
->si_uid
;
957 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
961 switch (to
->si_signo
)
964 to
->cpt_si_pid
= from
->si_pid
;
965 to
->cpt_si_uid
= from
->si_uid
;
966 to
->cpt_si_status
= from
->si_status
;
967 to
->cpt_si_utime
= from
->si_utime
;
968 to
->cpt_si_stime
= from
->si_stime
;
974 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
977 to
->cpt_si_band
= from
->si_band
;
978 to
->cpt_si_fd
= from
->si_fd
;
981 to
->cpt_si_pid
= from
->si_pid
;
982 to
->cpt_si_uid
= from
->si_uid
;
983 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
990 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
992 memset (to
, 0, sizeof (*to
));
994 to
->si_signo
= from
->si_signo
;
995 to
->si_errno
= from
->si_errno
;
996 to
->si_code
= from
->si_code
;
998 if (to
->si_code
== SI_TIMER
)
1000 to
->si_timerid
= from
->cpt_si_timerid
;
1001 to
->si_overrun
= from
->cpt_si_overrun
;
1002 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1004 else if (to
->si_code
== SI_USER
)
1006 to
->si_pid
= from
->cpt_si_pid
;
1007 to
->si_uid
= from
->cpt_si_uid
;
1009 else if (to
->si_code
< 0)
1011 to
->si_pid
= from
->cpt_si_pid
;
1012 to
->si_uid
= from
->cpt_si_uid
;
1013 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1017 switch (to
->si_signo
)
1020 to
->si_pid
= from
->cpt_si_pid
;
1021 to
->si_uid
= from
->cpt_si_uid
;
1022 to
->si_status
= from
->cpt_si_status
;
1023 to
->si_utime
= from
->cpt_si_utime
;
1024 to
->si_stime
= from
->cpt_si_stime
;
1030 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1033 to
->si_band
= from
->cpt_si_band
;
1034 to
->si_fd
= from
->cpt_si_fd
;
1037 to
->si_pid
= from
->cpt_si_pid
;
1038 to
->si_uid
= from
->cpt_si_uid
;
1039 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1046 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1049 memset (to
, 0, sizeof (*to
));
1051 to
->si_signo
= from
->si_signo
;
1052 to
->si_errno
= from
->si_errno
;
1053 to
->si_code
= from
->si_code
;
1055 if (to
->si_code
== SI_TIMER
)
1057 to
->cpt_si_timerid
= from
->si_timerid
;
1058 to
->cpt_si_overrun
= from
->si_overrun
;
1059 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1061 else if (to
->si_code
== SI_USER
)
1063 to
->cpt_si_pid
= from
->si_pid
;
1064 to
->cpt_si_uid
= from
->si_uid
;
1066 else if (to
->si_code
< 0)
1068 to
->cpt_si_pid
= from
->si_pid
;
1069 to
->cpt_si_uid
= from
->si_uid
;
1070 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1074 switch (to
->si_signo
)
1077 to
->cpt_si_pid
= from
->si_pid
;
1078 to
->cpt_si_uid
= from
->si_uid
;
1079 to
->cpt_si_status
= from
->si_status
;
1080 to
->cpt_si_utime
= from
->si_utime
;
1081 to
->cpt_si_stime
= from
->si_stime
;
1087 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1090 to
->cpt_si_band
= from
->si_band
;
1091 to
->cpt_si_fd
= from
->si_fd
;
1094 to
->cpt_si_pid
= from
->si_pid
;
1095 to
->cpt_si_uid
= from
->si_uid
;
1096 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1103 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1104 compat_x32_siginfo_t
*from
)
1106 memset (to
, 0, sizeof (*to
));
1108 to
->si_signo
= from
->si_signo
;
1109 to
->si_errno
= from
->si_errno
;
1110 to
->si_code
= from
->si_code
;
1112 if (to
->si_code
== SI_TIMER
)
1114 to
->si_timerid
= from
->cpt_si_timerid
;
1115 to
->si_overrun
= from
->cpt_si_overrun
;
1116 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1118 else if (to
->si_code
== SI_USER
)
1120 to
->si_pid
= from
->cpt_si_pid
;
1121 to
->si_uid
= from
->cpt_si_uid
;
1123 else if (to
->si_code
< 0)
1125 to
->si_pid
= from
->cpt_si_pid
;
1126 to
->si_uid
= from
->cpt_si_uid
;
1127 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1131 switch (to
->si_signo
)
1134 to
->si_pid
= from
->cpt_si_pid
;
1135 to
->si_uid
= from
->cpt_si_uid
;
1136 to
->si_status
= from
->cpt_si_status
;
1137 to
->si_utime
= from
->cpt_si_utime
;
1138 to
->si_stime
= from
->cpt_si_stime
;
1144 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1147 to
->si_band
= from
->cpt_si_band
;
1148 to
->si_fd
= from
->cpt_si_fd
;
1151 to
->si_pid
= from
->cpt_si_pid
;
1152 to
->si_uid
= from
->cpt_si_uid
;
1153 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1159 #endif /* __x86_64__ */
1161 /* Convert a native/host siginfo object, into/from the siginfo in the
1162 layout of the inferiors' architecture. Returns true if any
1163 conversion was done; false otherwise. If DIRECTION is 1, then copy
1164 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1168 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1171 unsigned int machine
;
1172 int tid
= lwpid_of (current_inferior
);
1173 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1175 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1176 if (!is_64bit_tdesc ())
1178 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1179 fatal ("unexpected difference in siginfo");
1182 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1184 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1188 /* No fixup for native x32 GDB. */
1189 else if (!is_elf64
&& sizeof (void *) == 8)
1191 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1192 fatal ("unexpected difference in siginfo");
1195 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1198 siginfo_from_compat_x32_siginfo (native
,
1199 (struct compat_x32_siginfo
*) inf
);
1210 /* Format of XSAVE extended state is:
1213 fxsave_bytes[0..463]
1214 sw_usable_bytes[464..511]
1215 xstate_hdr_bytes[512..575]
1220 Same memory layout will be used for the coredump NT_X86_XSTATE
1221 representing the XSAVE extended state registers.
1223 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1224 extended state mask, which is the same as the extended control register
1225 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1226 together with the mask saved in the xstate_hdr_bytes to determine what
1227 states the processor/OS supports and what state, used or initialized,
1228 the process/thread is in. */
1229 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1231 /* Does the current host support the GETFPXREGS request? The header
1232 file may or may not define it, and even if it is defined, the
1233 kernel will return EIO if it's running on a pre-SSE processor. */
1234 int have_ptrace_getfpxregs
=
1235 #ifdef HAVE_PTRACE_GETFPXREGS
1242 /* Does the current host support PTRACE_GETREGSET? */
1243 static int have_ptrace_getregset
= -1;
1245 /* Get Linux/x86 target description from running target. */
1247 static const struct target_desc
*
1248 x86_linux_read_description (void)
1250 unsigned int machine
;
1254 static uint64_t xcr0
;
1255 struct regset_info
*regset
;
1257 tid
= lwpid_of (current_inferior
);
1259 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1261 if (sizeof (void *) == 4)
1264 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1266 else if (machine
== EM_X86_64
)
1267 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1271 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1272 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1274 elf_fpxregset_t fpxregs
;
1276 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1278 have_ptrace_getfpxregs
= 0;
1279 have_ptrace_getregset
= 0;
1280 return tdesc_i386_mmx_linux
;
1283 have_ptrace_getfpxregs
= 1;
1289 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1291 /* Don't use XML. */
1293 if (machine
== EM_X86_64
)
1294 return tdesc_amd64_linux_no_xml
;
1297 return tdesc_i386_linux_no_xml
;
1300 if (have_ptrace_getregset
== -1)
1302 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1305 iov
.iov_base
= xstateregs
;
1306 iov
.iov_len
= sizeof (xstateregs
);
1308 /* Check if PTRACE_GETREGSET works. */
1309 if (ptrace (PTRACE_GETREGSET
, tid
,
1310 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1311 have_ptrace_getregset
= 0;
1314 have_ptrace_getregset
= 1;
1316 /* Get XCR0 from XSAVE extended state. */
1317 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1318 / sizeof (uint64_t))];
1320 /* Use PTRACE_GETREGSET if it is available. */
1321 for (regset
= x86_regsets
;
1322 regset
->fill_function
!= NULL
; regset
++)
1323 if (regset
->get_request
== PTRACE_GETREGSET
)
1324 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1325 else if (regset
->type
!= GENERAL_REGS
)
1330 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1331 xcr0_features
= (have_ptrace_getregset
1332 && (xcr0
& I386_XSTATE_ALL_MASK
));
1337 if (machine
== EM_X86_64
)
1344 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1346 case I386_XSTATE_MPX_MASK
:
1347 return tdesc_amd64_mpx_linux
;
1349 case I386_XSTATE_AVX_MASK
:
1350 return tdesc_amd64_avx_linux
;
1353 return tdesc_amd64_linux
;
1357 return tdesc_amd64_linux
;
1363 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1365 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1366 case I386_XSTATE_AVX_MASK
:
1367 return tdesc_x32_avx_linux
;
1370 return tdesc_x32_linux
;
1374 return tdesc_x32_linux
;
1382 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1384 case (I386_XSTATE_MPX_MASK
):
1385 return tdesc_i386_mpx_linux
;
1387 case (I386_XSTATE_AVX_MASK
):
1388 return tdesc_i386_avx_linux
;
1391 return tdesc_i386_linux
;
1395 return tdesc_i386_linux
;
1398 gdb_assert_not_reached ("failed to return tdesc");
1401 /* Callback for find_inferior. Stops iteration when a thread with a
1402 given PID is found. */
1405 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1407 int pid
= *(int *) data
;
1409 return (ptid_get_pid (entry
->id
) == pid
);
1412 /* Callback for for_each_inferior. Calls the arch_setup routine for
1416 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1418 int pid
= ptid_get_pid (entry
->id
);
1420 /* Look up any thread of this processes. */
1422 = (struct thread_info
*) find_inferior (&all_threads
,
1423 same_process_callback
, &pid
);
1425 the_low_target
.arch_setup ();
1428 /* Update all the target description of all processes; a new GDB
1429 connected, and it may or not support xml target descriptions. */
1432 x86_linux_update_xmltarget (void)
1434 struct thread_info
*save_inferior
= current_inferior
;
1436 /* Before changing the register cache's internal layout, flush the
1437 contents of the current valid caches back to the threads, and
1438 release the current regcache objects. */
1439 regcache_release ();
1441 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1443 current_inferior
= save_inferior
;
1446 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1447 PTRACE_GETREGSET. */
1450 x86_linux_process_qsupported (const char *query
)
1452 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1453 with "i386" in qSupported query, it supports x86 XML target
1456 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1458 char *copy
= xstrdup (query
+ 13);
1461 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1463 if (strcmp (p
, "i386") == 0)
1473 x86_linux_update_xmltarget ();
1476 /* Common for x86/x86-64. */
1478 static struct regsets_info x86_regsets_info
=
1480 x86_regsets
, /* regsets */
1481 0, /* num_regsets */
1482 NULL
, /* disabled_regsets */
1486 static struct regs_info amd64_linux_regs_info
=
1488 NULL
, /* regset_bitmap */
1489 NULL
, /* usrregs_info */
1493 static struct usrregs_info i386_linux_usrregs_info
=
1499 static struct regs_info i386_linux_regs_info
=
1501 NULL
, /* regset_bitmap */
1502 &i386_linux_usrregs_info
,
1506 const struct regs_info
*
1507 x86_linux_regs_info (void)
1510 if (is_64bit_tdesc ())
1511 return &amd64_linux_regs_info
;
1514 return &i386_linux_regs_info
;
1517 /* Initialize the target description for the architecture of the
1521 x86_arch_setup (void)
1523 current_process ()->tdesc
= x86_linux_read_description ();
1527 x86_supports_tracepoints (void)
1533 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1535 write_inferior_memory (*to
, buf
, len
);
1540 push_opcode (unsigned char *buf
, char *op
)
1542 unsigned char *buf_org
= buf
;
1547 unsigned long ul
= strtoul (op
, &endptr
, 16);
1556 return buf
- buf_org
;
1561 /* Build a jump pad that saves registers and calls a collection
1562 function. Writes a jump instruction to the jump pad to
1563 JJUMPAD_INSN. The caller is responsible to write it in at the
1564 tracepoint address. */
1567 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1568 CORE_ADDR collector
,
1571 CORE_ADDR
*jump_entry
,
1572 CORE_ADDR
*trampoline
,
1573 ULONGEST
*trampoline_size
,
1574 unsigned char *jjump_pad_insn
,
1575 ULONGEST
*jjump_pad_insn_size
,
1576 CORE_ADDR
*adjusted_insn_addr
,
1577 CORE_ADDR
*adjusted_insn_addr_end
,
1580 unsigned char buf
[40];
1584 CORE_ADDR buildaddr
= *jump_entry
;
1586 /* Build the jump pad. */
1588 /* First, do tracepoint data collection. Save registers. */
1590 /* Need to ensure stack pointer saved first. */
1591 buf
[i
++] = 0x54; /* push %rsp */
1592 buf
[i
++] = 0x55; /* push %rbp */
1593 buf
[i
++] = 0x57; /* push %rdi */
1594 buf
[i
++] = 0x56; /* push %rsi */
1595 buf
[i
++] = 0x52; /* push %rdx */
1596 buf
[i
++] = 0x51; /* push %rcx */
1597 buf
[i
++] = 0x53; /* push %rbx */
1598 buf
[i
++] = 0x50; /* push %rax */
1599 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1600 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1601 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1602 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1603 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1604 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1605 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1606 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1607 buf
[i
++] = 0x9c; /* pushfq */
1608 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1610 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1611 i
+= sizeof (unsigned long);
1612 buf
[i
++] = 0x57; /* push %rdi */
1613 append_insns (&buildaddr
, i
, buf
);
1615 /* Stack space for the collecting_t object. */
1617 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1618 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1619 memcpy (buf
+ i
, &tpoint
, 8);
1621 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1622 i
+= push_opcode (&buf
[i
],
1623 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1624 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1625 append_insns (&buildaddr
, i
, buf
);
1629 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1630 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1632 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1633 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1634 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1635 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1636 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1637 append_insns (&buildaddr
, i
, buf
);
1639 /* Set up the gdb_collect call. */
1640 /* At this point, (stack pointer + 0x18) is the base of our saved
1644 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1645 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1647 /* tpoint address may be 64-bit wide. */
1648 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1649 memcpy (buf
+ i
, &tpoint
, 8);
1651 append_insns (&buildaddr
, i
, buf
);
1653 /* The collector function being in the shared library, may be
1654 >31-bits away off the jump pad. */
1656 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1657 memcpy (buf
+ i
, &collector
, 8);
1659 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1660 append_insns (&buildaddr
, i
, buf
);
1662 /* Clear the spin-lock. */
1664 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1665 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1666 memcpy (buf
+ i
, &lockaddr
, 8);
1668 append_insns (&buildaddr
, i
, buf
);
1670 /* Remove stack that had been used for the collect_t object. */
1672 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1673 append_insns (&buildaddr
, i
, buf
);
1675 /* Restore register state. */
1677 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1681 buf
[i
++] = 0x9d; /* popfq */
1682 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1683 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1684 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1685 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1686 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1687 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1688 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1689 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1690 buf
[i
++] = 0x58; /* pop %rax */
1691 buf
[i
++] = 0x5b; /* pop %rbx */
1692 buf
[i
++] = 0x59; /* pop %rcx */
1693 buf
[i
++] = 0x5a; /* pop %rdx */
1694 buf
[i
++] = 0x5e; /* pop %rsi */
1695 buf
[i
++] = 0x5f; /* pop %rdi */
1696 buf
[i
++] = 0x5d; /* pop %rbp */
1697 buf
[i
++] = 0x5c; /* pop %rsp */
1698 append_insns (&buildaddr
, i
, buf
);
1700 /* Now, adjust the original instruction to execute in the jump
1702 *adjusted_insn_addr
= buildaddr
;
1703 relocate_instruction (&buildaddr
, tpaddr
);
1704 *adjusted_insn_addr_end
= buildaddr
;
1706 /* Finally, write a jump back to the program. */
1708 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1709 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1712 "E.Jump back from jump pad too far from tracepoint "
1713 "(offset 0x%" PRIx64
" > int32).", loffset
);
1717 offset
= (int) loffset
;
1718 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1719 memcpy (buf
+ 1, &offset
, 4);
1720 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1722 /* The jump pad is now built. Wire in a jump to our jump pad. This
1723 is always done last (by our caller actually), so that we can
1724 install fast tracepoints with threads running. This relies on
1725 the agent's atomic write support. */
1726 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1727 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1730 "E.Jump pad too far from tracepoint "
1731 "(offset 0x%" PRIx64
" > int32).", loffset
);
1735 offset
= (int) loffset
;
1737 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1738 memcpy (buf
+ 1, &offset
, 4);
1739 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1740 *jjump_pad_insn_size
= sizeof (jump_insn
);
1742 /* Return the end address of our pad. */
1743 *jump_entry
= buildaddr
;
1748 #endif /* __x86_64__ */
1750 /* Build a jump pad that saves registers and calls a collection
1751 function. Writes a jump instruction to the jump pad to
1752 JJUMPAD_INSN. The caller is responsible to write it in at the
1753 tracepoint address. */
1756 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1757 CORE_ADDR collector
,
1760 CORE_ADDR
*jump_entry
,
1761 CORE_ADDR
*trampoline
,
1762 ULONGEST
*trampoline_size
,
1763 unsigned char *jjump_pad_insn
,
1764 ULONGEST
*jjump_pad_insn_size
,
1765 CORE_ADDR
*adjusted_insn_addr
,
1766 CORE_ADDR
*adjusted_insn_addr_end
,
1769 unsigned char buf
[0x100];
1771 CORE_ADDR buildaddr
= *jump_entry
;
1773 /* Build the jump pad. */
1775 /* First, do tracepoint data collection. Save registers. */
1777 buf
[i
++] = 0x60; /* pushad */
1778 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1779 *((int *)(buf
+ i
)) = (int) tpaddr
;
1781 buf
[i
++] = 0x9c; /* pushf */
1782 buf
[i
++] = 0x1e; /* push %ds */
1783 buf
[i
++] = 0x06; /* push %es */
1784 buf
[i
++] = 0x0f; /* push %fs */
1786 buf
[i
++] = 0x0f; /* push %gs */
1788 buf
[i
++] = 0x16; /* push %ss */
1789 buf
[i
++] = 0x0e; /* push %cs */
1790 append_insns (&buildaddr
, i
, buf
);
1792 /* Stack space for the collecting_t object. */
1794 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1796 /* Build the object. */
1797 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1798 memcpy (buf
+ i
, &tpoint
, 4);
1800 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1802 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1803 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1804 append_insns (&buildaddr
, i
, buf
);
1806 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1807 If we cared for it, this could be using xchg alternatively. */
1810 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1811 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1813 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1815 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1816 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1817 append_insns (&buildaddr
, i
, buf
);
1820 /* Set up arguments to the gdb_collect call. */
1822 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1823 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1824 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1825 append_insns (&buildaddr
, i
, buf
);
1828 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1829 append_insns (&buildaddr
, i
, buf
);
1832 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1833 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1835 append_insns (&buildaddr
, i
, buf
);
1837 buf
[0] = 0xe8; /* call <reladdr> */
1838 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1839 memcpy (buf
+ 1, &offset
, 4);
1840 append_insns (&buildaddr
, 5, buf
);
1841 /* Clean up after the call. */
1842 buf
[0] = 0x83; /* add $0x8,%esp */
1845 append_insns (&buildaddr
, 3, buf
);
1848 /* Clear the spin-lock. This would need the LOCK prefix on older
1851 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1852 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1853 memcpy (buf
+ i
, &lockaddr
, 4);
1855 append_insns (&buildaddr
, i
, buf
);
1858 /* Remove stack that had been used for the collect_t object. */
1860 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1861 append_insns (&buildaddr
, i
, buf
);
1864 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1867 buf
[i
++] = 0x17; /* pop %ss */
1868 buf
[i
++] = 0x0f; /* pop %gs */
1870 buf
[i
++] = 0x0f; /* pop %fs */
1872 buf
[i
++] = 0x07; /* pop %es */
1873 buf
[i
++] = 0x1f; /* pop %ds */
1874 buf
[i
++] = 0x9d; /* popf */
1875 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1878 buf
[i
++] = 0x61; /* popad */
1879 append_insns (&buildaddr
, i
, buf
);
1881 /* Now, adjust the original instruction to execute in the jump
1883 *adjusted_insn_addr
= buildaddr
;
1884 relocate_instruction (&buildaddr
, tpaddr
);
1885 *adjusted_insn_addr_end
= buildaddr
;
1887 /* Write the jump back to the program. */
1888 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1889 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1890 memcpy (buf
+ 1, &offset
, 4);
1891 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1893 /* The jump pad is now built. Wire in a jump to our jump pad. This
1894 is always done last (by our caller actually), so that we can
1895 install fast tracepoints with threads running. This relies on
1896 the agent's atomic write support. */
1899 /* Create a trampoline. */
1900 *trampoline_size
= sizeof (jump_insn
);
1901 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1903 /* No trampoline space available. */
1905 "E.Cannot allocate trampoline space needed for fast "
1906 "tracepoints on 4-byte instructions.");
1910 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1911 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1912 memcpy (buf
+ 1, &offset
, 4);
1913 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1915 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1916 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1917 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1918 memcpy (buf
+ 2, &offset
, 2);
1919 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1920 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1924 /* Else use a 32-bit relative jump instruction. */
1925 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1926 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1927 memcpy (buf
+ 1, &offset
, 4);
1928 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1929 *jjump_pad_insn_size
= sizeof (jump_insn
);
1932 /* Return the end address of our pad. */
1933 *jump_entry
= buildaddr
;
1939 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1940 CORE_ADDR collector
,
1943 CORE_ADDR
*jump_entry
,
1944 CORE_ADDR
*trampoline
,
1945 ULONGEST
*trampoline_size
,
1946 unsigned char *jjump_pad_insn
,
1947 ULONGEST
*jjump_pad_insn_size
,
1948 CORE_ADDR
*adjusted_insn_addr
,
1949 CORE_ADDR
*adjusted_insn_addr_end
,
1953 if (is_64bit_tdesc ())
1954 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1955 collector
, lockaddr
,
1956 orig_size
, jump_entry
,
1957 trampoline
, trampoline_size
,
1959 jjump_pad_insn_size
,
1961 adjusted_insn_addr_end
,
1965 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1966 collector
, lockaddr
,
1967 orig_size
, jump_entry
,
1968 trampoline
, trampoline_size
,
1970 jjump_pad_insn_size
,
1972 adjusted_insn_addr_end
,
1976 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1980 x86_get_min_fast_tracepoint_insn_len (void)
1982 static int warned_about_fast_tracepoints
= 0;
1985 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1986 used for fast tracepoints. */
1987 if (is_64bit_tdesc ())
1991 if (agent_loaded_p ())
1993 char errbuf
[IPA_BUFSIZ
];
1997 /* On x86, if trampolines are available, then 4-byte jump instructions
1998 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1999 with a 4-byte offset are used instead. */
2000 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2004 /* GDB has no channel to explain to user why a shorter fast
2005 tracepoint is not possible, but at least make GDBserver
2006 mention that something has gone awry. */
2007 if (!warned_about_fast_tracepoints
)
2009 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2010 warned_about_fast_tracepoints
= 1;
2017 /* Indicate that the minimum length is currently unknown since the IPA
2018 has not loaded yet. */
2024 add_insns (unsigned char *start
, int len
)
2026 CORE_ADDR buildaddr
= current_insn_ptr
;
2029 debug_printf ("Adding %d bytes of insn at %s\n",
2030 len
, paddress (buildaddr
));
2032 append_insns (&buildaddr
, len
, start
);
2033 current_insn_ptr
= buildaddr
;
2036 /* Our general strategy for emitting code is to avoid specifying raw
2037 bytes whenever possible, and instead copy a block of inline asm
2038 that is embedded in the function. This is a little messy, because
2039 we need to keep the compiler from discarding what looks like dead
2040 code, plus suppress various warnings. */
2042 #define EMIT_ASM(NAME, INSNS) \
2045 extern unsigned char start_ ## NAME, end_ ## NAME; \
2046 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2047 __asm__ ("jmp end_" #NAME "\n" \
2048 "\t" "start_" #NAME ":" \
2050 "\t" "end_" #NAME ":"); \
2055 #define EMIT_ASM32(NAME,INSNS) \
2058 extern unsigned char start_ ## NAME, end_ ## NAME; \
2059 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2060 __asm__ (".code32\n" \
2061 "\t" "jmp end_" #NAME "\n" \
2062 "\t" "start_" #NAME ":\n" \
2064 "\t" "end_" #NAME ":\n" \
2070 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2077 amd64_emit_prologue (void)
2079 EMIT_ASM (amd64_prologue
,
2081 "movq %rsp,%rbp\n\t"
2082 "sub $0x20,%rsp\n\t"
2083 "movq %rdi,-8(%rbp)\n\t"
2084 "movq %rsi,-16(%rbp)");
2089 amd64_emit_epilogue (void)
2091 EMIT_ASM (amd64_epilogue
,
2092 "movq -16(%rbp),%rdi\n\t"
2093 "movq %rax,(%rdi)\n\t"
2100 amd64_emit_add (void)
2102 EMIT_ASM (amd64_add
,
2103 "add (%rsp),%rax\n\t"
2104 "lea 0x8(%rsp),%rsp");
2108 amd64_emit_sub (void)
2110 EMIT_ASM (amd64_sub
,
2111 "sub %rax,(%rsp)\n\t"
2116 amd64_emit_mul (void)
2122 amd64_emit_lsh (void)
2128 amd64_emit_rsh_signed (void)
2134 amd64_emit_rsh_unsigned (void)
2140 amd64_emit_ext (int arg
)
2145 EMIT_ASM (amd64_ext_8
,
2151 EMIT_ASM (amd64_ext_16
,
2156 EMIT_ASM (amd64_ext_32
,
2165 amd64_emit_log_not (void)
2167 EMIT_ASM (amd64_log_not
,
2168 "test %rax,%rax\n\t"
2174 amd64_emit_bit_and (void)
2176 EMIT_ASM (amd64_and
,
2177 "and (%rsp),%rax\n\t"
2178 "lea 0x8(%rsp),%rsp");
2182 amd64_emit_bit_or (void)
2185 "or (%rsp),%rax\n\t"
2186 "lea 0x8(%rsp),%rsp");
2190 amd64_emit_bit_xor (void)
2192 EMIT_ASM (amd64_xor
,
2193 "xor (%rsp),%rax\n\t"
2194 "lea 0x8(%rsp),%rsp");
2198 amd64_emit_bit_not (void)
2200 EMIT_ASM (amd64_bit_not
,
2201 "xorq $0xffffffffffffffff,%rax");
2205 amd64_emit_equal (void)
2207 EMIT_ASM (amd64_equal
,
2208 "cmp %rax,(%rsp)\n\t"
2209 "je .Lamd64_equal_true\n\t"
2211 "jmp .Lamd64_equal_end\n\t"
2212 ".Lamd64_equal_true:\n\t"
2214 ".Lamd64_equal_end:\n\t"
2215 "lea 0x8(%rsp),%rsp");
2219 amd64_emit_less_signed (void)
2221 EMIT_ASM (amd64_less_signed
,
2222 "cmp %rax,(%rsp)\n\t"
2223 "jl .Lamd64_less_signed_true\n\t"
2225 "jmp .Lamd64_less_signed_end\n\t"
2226 ".Lamd64_less_signed_true:\n\t"
2228 ".Lamd64_less_signed_end:\n\t"
2229 "lea 0x8(%rsp),%rsp");
2233 amd64_emit_less_unsigned (void)
2235 EMIT_ASM (amd64_less_unsigned
,
2236 "cmp %rax,(%rsp)\n\t"
2237 "jb .Lamd64_less_unsigned_true\n\t"
2239 "jmp .Lamd64_less_unsigned_end\n\t"
2240 ".Lamd64_less_unsigned_true:\n\t"
2242 ".Lamd64_less_unsigned_end:\n\t"
2243 "lea 0x8(%rsp),%rsp");
2247 amd64_emit_ref (int size
)
2252 EMIT_ASM (amd64_ref1
,
2256 EMIT_ASM (amd64_ref2
,
2260 EMIT_ASM (amd64_ref4
,
2261 "movl (%rax),%eax");
2264 EMIT_ASM (amd64_ref8
,
2265 "movq (%rax),%rax");
2271 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2273 EMIT_ASM (amd64_if_goto
,
2277 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2285 amd64_emit_goto (int *offset_p
, int *size_p
)
2287 EMIT_ASM (amd64_goto
,
2288 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2296 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2298 int diff
= (to
- (from
+ size
));
2299 unsigned char buf
[sizeof (int)];
2307 memcpy (buf
, &diff
, sizeof (int));
2308 write_inferior_memory (from
, buf
, sizeof (int));
2312 amd64_emit_const (LONGEST num
)
2314 unsigned char buf
[16];
2316 CORE_ADDR buildaddr
= current_insn_ptr
;
2319 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2320 memcpy (&buf
[i
], &num
, sizeof (num
));
2322 append_insns (&buildaddr
, i
, buf
);
2323 current_insn_ptr
= buildaddr
;
2327 amd64_emit_call (CORE_ADDR fn
)
2329 unsigned char buf
[16];
2331 CORE_ADDR buildaddr
;
2334 /* The destination function being in the shared library, may be
2335 >31-bits away off the compiled code pad. */
2337 buildaddr
= current_insn_ptr
;
2339 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2343 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2345 /* Offset is too large for a call. Use callq, but that requires
2346 a register, so avoid it if possible. Use r10, since it is
2347 call-clobbered, we don't have to push/pop it. */
2348 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2350 memcpy (buf
+ i
, &fn
, 8);
2352 buf
[i
++] = 0xff; /* callq *%r10 */
2357 int offset32
= offset64
; /* we know we can't overflow here. */
2358 memcpy (buf
+ i
, &offset32
, 4);
2362 append_insns (&buildaddr
, i
, buf
);
2363 current_insn_ptr
= buildaddr
;
2367 amd64_emit_reg (int reg
)
2369 unsigned char buf
[16];
2371 CORE_ADDR buildaddr
;
2373 /* Assume raw_regs is still in %rdi. */
2374 buildaddr
= current_insn_ptr
;
2376 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2377 memcpy (&buf
[i
], ®
, sizeof (reg
));
2379 append_insns (&buildaddr
, i
, buf
);
2380 current_insn_ptr
= buildaddr
;
2381 amd64_emit_call (get_raw_reg_func_addr ());
2385 amd64_emit_pop (void)
2387 EMIT_ASM (amd64_pop
,
2392 amd64_emit_stack_flush (void)
2394 EMIT_ASM (amd64_stack_flush
,
2399 amd64_emit_zero_ext (int arg
)
2404 EMIT_ASM (amd64_zero_ext_8
,
2408 EMIT_ASM (amd64_zero_ext_16
,
2409 "and $0xffff,%rax");
2412 EMIT_ASM (amd64_zero_ext_32
,
2413 "mov $0xffffffff,%rcx\n\t"
2422 amd64_emit_swap (void)
2424 EMIT_ASM (amd64_swap
,
2431 amd64_emit_stack_adjust (int n
)
2433 unsigned char buf
[16];
2435 CORE_ADDR buildaddr
= current_insn_ptr
;
2438 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2442 /* This only handles adjustments up to 16, but we don't expect any more. */
2444 append_insns (&buildaddr
, i
, buf
);
2445 current_insn_ptr
= buildaddr
;
2448 /* FN's prototype is `LONGEST(*fn)(int)'. */
2451 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2453 unsigned char buf
[16];
2455 CORE_ADDR buildaddr
;
2457 buildaddr
= current_insn_ptr
;
2459 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2460 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2462 append_insns (&buildaddr
, i
, buf
);
2463 current_insn_ptr
= buildaddr
;
2464 amd64_emit_call (fn
);
2467 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2470 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2472 unsigned char buf
[16];
2474 CORE_ADDR buildaddr
;
2476 buildaddr
= current_insn_ptr
;
2478 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2479 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2481 append_insns (&buildaddr
, i
, buf
);
2482 current_insn_ptr
= buildaddr
;
2483 EMIT_ASM (amd64_void_call_2_a
,
2484 /* Save away a copy of the stack top. */
2486 /* Also pass top as the second argument. */
2488 amd64_emit_call (fn
);
2489 EMIT_ASM (amd64_void_call_2_b
,
2490 /* Restore the stack top, %rax may have been trashed. */
2495 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2498 "cmp %rax,(%rsp)\n\t"
2499 "jne .Lamd64_eq_fallthru\n\t"
2500 "lea 0x8(%rsp),%rsp\n\t"
2502 /* jmp, but don't trust the assembler to choose the right jump */
2503 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2504 ".Lamd64_eq_fallthru:\n\t"
2505 "lea 0x8(%rsp),%rsp\n\t"
2515 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2518 "cmp %rax,(%rsp)\n\t"
2519 "je .Lamd64_ne_fallthru\n\t"
2520 "lea 0x8(%rsp),%rsp\n\t"
2522 /* jmp, but don't trust the assembler to choose the right jump */
2523 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2524 ".Lamd64_ne_fallthru:\n\t"
2525 "lea 0x8(%rsp),%rsp\n\t"
2535 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2538 "cmp %rax,(%rsp)\n\t"
2539 "jnl .Lamd64_lt_fallthru\n\t"
2540 "lea 0x8(%rsp),%rsp\n\t"
2542 /* jmp, but don't trust the assembler to choose the right jump */
2543 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2544 ".Lamd64_lt_fallthru:\n\t"
2545 "lea 0x8(%rsp),%rsp\n\t"
2555 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2558 "cmp %rax,(%rsp)\n\t"
2559 "jnle .Lamd64_le_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_le_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2575 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2578 "cmp %rax,(%rsp)\n\t"
2579 "jng .Lamd64_gt_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_gt_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2595 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnge .Lamd64_ge_fallthru\n\t"
2600 ".Lamd64_ge_jump:\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2603 /* jmp, but don't trust the assembler to choose the right jump */
2604 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2605 ".Lamd64_ge_fallthru:\n\t"
2606 "lea 0x8(%rsp),%rsp\n\t"
2615 struct emit_ops amd64_emit_ops
=
2617 amd64_emit_prologue
,
2618 amd64_emit_epilogue
,
2623 amd64_emit_rsh_signed
,
2624 amd64_emit_rsh_unsigned
,
2632 amd64_emit_less_signed
,
2633 amd64_emit_less_unsigned
,
2637 amd64_write_goto_address
,
2642 amd64_emit_stack_flush
,
2643 amd64_emit_zero_ext
,
2645 amd64_emit_stack_adjust
,
2646 amd64_emit_int_call_1
,
2647 amd64_emit_void_call_2
,
2656 #endif /* __x86_64__ */
2659 i386_emit_prologue (void)
2661 EMIT_ASM32 (i386_prologue
,
2665 /* At this point, the raw regs base address is at 8(%ebp), and the
2666 value pointer is at 12(%ebp). */
2670 i386_emit_epilogue (void)
2672 EMIT_ASM32 (i386_epilogue
,
2673 "mov 12(%ebp),%ecx\n\t"
2674 "mov %eax,(%ecx)\n\t"
2675 "mov %ebx,0x4(%ecx)\n\t"
2683 i386_emit_add (void)
2685 EMIT_ASM32 (i386_add
,
2686 "add (%esp),%eax\n\t"
2687 "adc 0x4(%esp),%ebx\n\t"
2688 "lea 0x8(%esp),%esp");
2692 i386_emit_sub (void)
2694 EMIT_ASM32 (i386_sub
,
2695 "subl %eax,(%esp)\n\t"
2696 "sbbl %ebx,4(%esp)\n\t"
2702 i386_emit_mul (void)
2708 i386_emit_lsh (void)
2714 i386_emit_rsh_signed (void)
2720 i386_emit_rsh_unsigned (void)
2726 i386_emit_ext (int arg
)
2731 EMIT_ASM32 (i386_ext_8
,
2734 "movl %eax,%ebx\n\t"
2738 EMIT_ASM32 (i386_ext_16
,
2740 "movl %eax,%ebx\n\t"
2744 EMIT_ASM32 (i386_ext_32
,
2745 "movl %eax,%ebx\n\t"
2754 i386_emit_log_not (void)
2756 EMIT_ASM32 (i386_log_not
,
2758 "test %eax,%eax\n\t"
2765 i386_emit_bit_and (void)
2767 EMIT_ASM32 (i386_and
,
2768 "and (%esp),%eax\n\t"
2769 "and 0x4(%esp),%ebx\n\t"
2770 "lea 0x8(%esp),%esp");
2774 i386_emit_bit_or (void)
2776 EMIT_ASM32 (i386_or
,
2777 "or (%esp),%eax\n\t"
2778 "or 0x4(%esp),%ebx\n\t"
2779 "lea 0x8(%esp),%esp");
2783 i386_emit_bit_xor (void)
2785 EMIT_ASM32 (i386_xor
,
2786 "xor (%esp),%eax\n\t"
2787 "xor 0x4(%esp),%ebx\n\t"
2788 "lea 0x8(%esp),%esp");
2792 i386_emit_bit_not (void)
2794 EMIT_ASM32 (i386_bit_not
,
2795 "xor $0xffffffff,%eax\n\t"
2796 "xor $0xffffffff,%ebx\n\t");
2800 i386_emit_equal (void)
2802 EMIT_ASM32 (i386_equal
,
2803 "cmpl %ebx,4(%esp)\n\t"
2804 "jne .Li386_equal_false\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "je .Li386_equal_true\n\t"
2807 ".Li386_equal_false:\n\t"
2809 "jmp .Li386_equal_end\n\t"
2810 ".Li386_equal_true:\n\t"
2812 ".Li386_equal_end:\n\t"
2814 "lea 0x8(%esp),%esp");
2818 i386_emit_less_signed (void)
2820 EMIT_ASM32 (i386_less_signed
,
2821 "cmpl %ebx,4(%esp)\n\t"
2822 "jl .Li386_less_signed_true\n\t"
2823 "jne .Li386_less_signed_false\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jl .Li386_less_signed_true\n\t"
2826 ".Li386_less_signed_false:\n\t"
2828 "jmp .Li386_less_signed_end\n\t"
2829 ".Li386_less_signed_true:\n\t"
2831 ".Li386_less_signed_end:\n\t"
2833 "lea 0x8(%esp),%esp");
2837 i386_emit_less_unsigned (void)
2839 EMIT_ASM32 (i386_less_unsigned
,
2840 "cmpl %ebx,4(%esp)\n\t"
2841 "jb .Li386_less_unsigned_true\n\t"
2842 "jne .Li386_less_unsigned_false\n\t"
2843 "cmpl %eax,(%esp)\n\t"
2844 "jb .Li386_less_unsigned_true\n\t"
2845 ".Li386_less_unsigned_false:\n\t"
2847 "jmp .Li386_less_unsigned_end\n\t"
2848 ".Li386_less_unsigned_true:\n\t"
2850 ".Li386_less_unsigned_end:\n\t"
2852 "lea 0x8(%esp),%esp");
2856 i386_emit_ref (int size
)
2861 EMIT_ASM32 (i386_ref1
,
2865 EMIT_ASM32 (i386_ref2
,
2869 EMIT_ASM32 (i386_ref4
,
2870 "movl (%eax),%eax");
2873 EMIT_ASM32 (i386_ref8
,
2874 "movl 4(%eax),%ebx\n\t"
2875 "movl (%eax),%eax");
2881 i386_emit_if_goto (int *offset_p
, int *size_p
)
2883 EMIT_ASM32 (i386_if_goto
,
2889 /* Don't trust the assembler to choose the right jump */
2890 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2893 *offset_p
= 11; /* be sure that this matches the sequence above */
2899 i386_emit_goto (int *offset_p
, int *size_p
)
2901 EMIT_ASM32 (i386_goto
,
2902 /* Don't trust the assembler to choose the right jump */
2903 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2911 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2913 int diff
= (to
- (from
+ size
));
2914 unsigned char buf
[sizeof (int)];
2916 /* We're only doing 4-byte sizes at the moment. */
2923 memcpy (buf
, &diff
, sizeof (int));
2924 write_inferior_memory (from
, buf
, sizeof (int));
2928 i386_emit_const (LONGEST num
)
2930 unsigned char buf
[16];
2932 CORE_ADDR buildaddr
= current_insn_ptr
;
2935 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2936 lo
= num
& 0xffffffff;
2937 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2939 hi
= ((num
>> 32) & 0xffffffff);
2942 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2943 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2948 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2950 append_insns (&buildaddr
, i
, buf
);
2951 current_insn_ptr
= buildaddr
;
2955 i386_emit_call (CORE_ADDR fn
)
2957 unsigned char buf
[16];
2959 CORE_ADDR buildaddr
;
2961 buildaddr
= current_insn_ptr
;
2963 buf
[i
++] = 0xe8; /* call <reladdr> */
2964 offset
= ((int) fn
) - (buildaddr
+ 5);
2965 memcpy (buf
+ 1, &offset
, 4);
2966 append_insns (&buildaddr
, 5, buf
);
2967 current_insn_ptr
= buildaddr
;
2971 i386_emit_reg (int reg
)
2973 unsigned char buf
[16];
2975 CORE_ADDR buildaddr
;
2977 EMIT_ASM32 (i386_reg_a
,
2979 buildaddr
= current_insn_ptr
;
2981 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2982 memcpy (&buf
[i
], ®
, sizeof (reg
));
2984 append_insns (&buildaddr
, i
, buf
);
2985 current_insn_ptr
= buildaddr
;
2986 EMIT_ASM32 (i386_reg_b
,
2987 "mov %eax,4(%esp)\n\t"
2988 "mov 8(%ebp),%eax\n\t"
2990 i386_emit_call (get_raw_reg_func_addr ());
2991 EMIT_ASM32 (i386_reg_c
,
2993 "lea 0x8(%esp),%esp");
2997 i386_emit_pop (void)
2999 EMIT_ASM32 (i386_pop
,
3005 i386_emit_stack_flush (void)
3007 EMIT_ASM32 (i386_stack_flush
,
3013 i386_emit_zero_ext (int arg
)
3018 EMIT_ASM32 (i386_zero_ext_8
,
3019 "and $0xff,%eax\n\t"
3023 EMIT_ASM32 (i386_zero_ext_16
,
3024 "and $0xffff,%eax\n\t"
3028 EMIT_ASM32 (i386_zero_ext_32
,
3037 i386_emit_swap (void)
3039 EMIT_ASM32 (i386_swap
,
3049 i386_emit_stack_adjust (int n
)
3051 unsigned char buf
[16];
3053 CORE_ADDR buildaddr
= current_insn_ptr
;
3056 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3060 append_insns (&buildaddr
, i
, buf
);
3061 current_insn_ptr
= buildaddr
;
3064 /* FN's prototype is `LONGEST(*fn)(int)'. */
3067 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3069 unsigned char buf
[16];
3071 CORE_ADDR buildaddr
;
3073 EMIT_ASM32 (i386_int_call_1_a
,
3074 /* Reserve a bit of stack space. */
3076 /* Put the one argument on the stack. */
3077 buildaddr
= current_insn_ptr
;
3079 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3082 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3084 append_insns (&buildaddr
, i
, buf
);
3085 current_insn_ptr
= buildaddr
;
3086 i386_emit_call (fn
);
3087 EMIT_ASM32 (i386_int_call_1_c
,
3089 "lea 0x8(%esp),%esp");
3092 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3095 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3097 unsigned char buf
[16];
3099 CORE_ADDR buildaddr
;
3101 EMIT_ASM32 (i386_void_call_2_a
,
3102 /* Preserve %eax only; we don't have to worry about %ebx. */
3104 /* Reserve a bit of stack space for arguments. */
3105 "sub $0x10,%esp\n\t"
3106 /* Copy "top" to the second argument position. (Note that
3107 we can't assume function won't scribble on its
3108 arguments, so don't try to restore from this.) */
3109 "mov %eax,4(%esp)\n\t"
3110 "mov %ebx,8(%esp)");
3111 /* Put the first argument on the stack. */
3112 buildaddr
= current_insn_ptr
;
3114 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3117 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3119 append_insns (&buildaddr
, i
, buf
);
3120 current_insn_ptr
= buildaddr
;
3121 i386_emit_call (fn
);
3122 EMIT_ASM32 (i386_void_call_2_b
,
3123 "lea 0x10(%esp),%esp\n\t"
3124 /* Restore original stack top. */
3130 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3133 /* Check low half first, more likely to be decider */
3134 "cmpl %eax,(%esp)\n\t"
3135 "jne .Leq_fallthru\n\t"
3136 "cmpl %ebx,4(%esp)\n\t"
3137 "jne .Leq_fallthru\n\t"
3138 "lea 0x8(%esp),%esp\n\t"
3141 /* jmp, but don't trust the assembler to choose the right jump */
3142 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3143 ".Leq_fallthru:\n\t"
3144 "lea 0x8(%esp),%esp\n\t"
3155 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3158 /* Check low half first, more likely to be decider */
3159 "cmpl %eax,(%esp)\n\t"
3161 "cmpl %ebx,4(%esp)\n\t"
3162 "je .Lne_fallthru\n\t"
3164 "lea 0x8(%esp),%esp\n\t"
3167 /* jmp, but don't trust the assembler to choose the right jump */
3168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3169 ".Lne_fallthru:\n\t"
3170 "lea 0x8(%esp),%esp\n\t"
3181 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3184 "cmpl %ebx,4(%esp)\n\t"
3186 "jne .Llt_fallthru\n\t"
3187 "cmpl %eax,(%esp)\n\t"
3188 "jnl .Llt_fallthru\n\t"
3190 "lea 0x8(%esp),%esp\n\t"
3193 /* jmp, but don't trust the assembler to choose the right jump */
3194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3195 ".Llt_fallthru:\n\t"
3196 "lea 0x8(%esp),%esp\n\t"
3207 i386_emit_le_goto (int *offset_p
, int *size_p
)
3210 "cmpl %ebx,4(%esp)\n\t"
3212 "jne .Lle_fallthru\n\t"
3213 "cmpl %eax,(%esp)\n\t"
3214 "jnle .Lle_fallthru\n\t"
3216 "lea 0x8(%esp),%esp\n\t"
3219 /* jmp, but don't trust the assembler to choose the right jump */
3220 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3221 ".Lle_fallthru:\n\t"
3222 "lea 0x8(%esp),%esp\n\t"
3233 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3236 "cmpl %ebx,4(%esp)\n\t"
3238 "jne .Lgt_fallthru\n\t"
3239 "cmpl %eax,(%esp)\n\t"
3240 "jng .Lgt_fallthru\n\t"
3242 "lea 0x8(%esp),%esp\n\t"
3245 /* jmp, but don't trust the assembler to choose the right jump */
3246 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3247 ".Lgt_fallthru:\n\t"
3248 "lea 0x8(%esp),%esp\n\t"
3259 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3262 "cmpl %ebx,4(%esp)\n\t"
3264 "jne .Lge_fallthru\n\t"
3265 "cmpl %eax,(%esp)\n\t"
3266 "jnge .Lge_fallthru\n\t"
3268 "lea 0x8(%esp),%esp\n\t"
3271 /* jmp, but don't trust the assembler to choose the right jump */
3272 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3273 ".Lge_fallthru:\n\t"
3274 "lea 0x8(%esp),%esp\n\t"
3284 struct emit_ops i386_emit_ops
=
3292 i386_emit_rsh_signed
,
3293 i386_emit_rsh_unsigned
,
3301 i386_emit_less_signed
,
3302 i386_emit_less_unsigned
,
3306 i386_write_goto_address
,
3311 i386_emit_stack_flush
,
3314 i386_emit_stack_adjust
,
3315 i386_emit_int_call_1
,
3316 i386_emit_void_call_2
,
3326 static struct emit_ops
*
3330 if (is_64bit_tdesc ())
3331 return &amd64_emit_ops
;
3334 return &i386_emit_ops
;
3338 x86_supports_range_stepping (void)
3343 /* This is initialized assuming an amd64 target.
3344 x86_arch_setup will correct it for i386 or amd64 targets. */
3346 struct linux_target_ops the_low_target
=
3349 x86_linux_regs_info
,
3350 x86_cannot_fetch_register
,
3351 x86_cannot_store_register
,
3352 NULL
, /* fetch_register */
3362 x86_stopped_by_watchpoint
,
3363 x86_stopped_data_address
,
3364 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3365 native i386 case (no registers smaller than an xfer unit), and are not
3366 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3369 /* need to fix up i386 siginfo if host is amd64 */
3371 x86_linux_new_process
,
3372 x86_linux_new_thread
,
3373 x86_linux_prepare_to_resume
,
3374 x86_linux_process_qsupported
,
3375 x86_supports_tracepoints
,
3376 x86_get_thread_area
,
3377 x86_install_fast_tracepoint_jump_pad
,
3379 x86_get_min_fast_tracepoint_insn_len
,
3380 x86_supports_range_stepping
,
3384 initialize_low_arch (void)
3386 /* Initialize the Linux target descriptions. */
3388 init_registers_amd64_linux ();
3389 init_registers_amd64_avx_linux ();
3390 init_registers_amd64_mpx_linux ();
3392 init_registers_x32_linux ();
3393 init_registers_x32_avx_linux ();
3395 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3396 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3397 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3399 init_registers_i386_linux ();
3400 init_registers_i386_mmx_linux ();
3401 init_registers_i386_avx_linux ();
3402 init_registers_i386_mpx_linux ();
3404 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3405 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3406 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3408 initialize_regsets_info (&x86_regsets_info
);