1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
46 /* Backward compatibility for gdb without XML support. */
48 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
54 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
92 /* Per-process arch-specific data we want to keep. */
94 struct arch_process_info
96 struct i386_debug_reg_state debug_reg_state
;
99 /* Per-thread arch-specific data we want to keep. */
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed
;
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap
[] =
114 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
115 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
116 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
117 DS
* 8, ES
* 8, FS
* 8, GS
* 8
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
125 static const int x86_64_regmap
[] =
127 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
128 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
129 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
130 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
131 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
132 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142 #else /* ! __x86_64__ */
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap
[] =
148 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
149 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
150 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
151 DS
* 4, ES
* 4, FS
* 4, GS
* 4
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* Called by libthread_db. */
161 ps_get_thread_area (const struct ps_prochandle
*ph
,
162 lwpid_t lwpid
, int idx
, void **base
)
165 int use_64bit
= register_size (0) == 8;
172 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
176 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
187 unsigned int desc
[4];
189 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
190 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
193 *(int *)base
= desc
[1];
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
204 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
207 int use_64bit
= register_size (0) == 8;
212 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
214 *addr
= (CORE_ADDR
) (uintptr_t) base
;
223 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
224 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
225 unsigned int desc
[4];
227 const int reg_thread_area
= 3; /* bits to scale down register value. */
230 collect_register_by_name (regcache
, "gs", &gs
);
232 idx
= gs
>> reg_thread_area
;
234 if (ptrace (PTRACE_GET_THREAD_AREA
,
235 lwpid_of (lwp
), (void *) (long) idx
, (unsigned long) &desc
) < 0)
246 i386_cannot_store_register (int regno
)
248 return regno
>= I386_NUM_REGS
;
252 i386_cannot_fetch_register (int regno
)
254 return regno
>= I386_NUM_REGS
;
258 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
263 if (register_size (0) == 8)
265 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
266 if (x86_64_regmap
[i
] != -1)
267 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
272 for (i
= 0; i
< I386_NUM_REGS
; i
++)
273 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
275 collect_register_by_name (regcache
, "orig_eax",
276 ((char *) buf
) + ORIG_EAX
* 4);
280 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
285 if (register_size (0) == 8)
287 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
288 if (x86_64_regmap
[i
] != -1)
289 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
294 for (i
= 0; i
< I386_NUM_REGS
; i
++)
295 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
297 supply_register_by_name (regcache
, "orig_eax",
298 ((char *) buf
) + ORIG_EAX
* 4);
302 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
305 i387_cache_to_fxsave (regcache
, buf
);
307 i387_cache_to_fsave (regcache
, buf
);
312 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
315 i387_fxsave_to_cache (regcache
, buf
);
317 i387_fsave_to_cache (regcache
, buf
);
324 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
326 i387_cache_to_fxsave (regcache
, buf
);
330 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
332 i387_fxsave_to_cache (regcache
, buf
);
338 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
340 i387_cache_to_xsave (regcache
, buf
);
344 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
346 i387_xsave_to_cache (regcache
, buf
);
349 /* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
358 struct regset_info target_regsets
[] =
360 #ifdef HAVE_PTRACE_GETREGS
361 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
363 x86_fill_gregset
, x86_store_gregset
},
364 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
365 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
367 # ifdef HAVE_PTRACE_GETFPXREGS
368 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
370 x86_fill_fpxregset
, x86_store_fpxregset
},
373 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
375 x86_fill_fpregset
, x86_store_fpregset
},
376 #endif /* HAVE_PTRACE_GETREGS */
377 { 0, 0, 0, -1, -1, NULL
, NULL
}
381 x86_get_pc (struct regcache
*regcache
)
383 int use_64bit
= register_size (0) == 8;
388 collect_register_by_name (regcache
, "rip", &pc
);
389 return (CORE_ADDR
) pc
;
394 collect_register_by_name (regcache
, "eip", &pc
);
395 return (CORE_ADDR
) pc
;
400 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
402 int use_64bit
= register_size (0) == 8;
406 unsigned long newpc
= pc
;
407 supply_register_by_name (regcache
, "rip", &newpc
);
411 unsigned int newpc
= pc
;
412 supply_register_by_name (regcache
, "eip", &newpc
);
416 static const unsigned char x86_breakpoint
[] = { 0xCC };
417 #define x86_breakpoint_len 1
420 x86_breakpoint_at (CORE_ADDR pc
)
424 (*the_target
->read_memory
) (pc
, &c
, 1);
431 /* Support for debug registers. */
434 x86_linux_dr_get (ptid_t ptid
, int regnum
)
439 tid
= ptid_get_lwp (ptid
);
442 value
= ptrace (PTRACE_PEEKUSER
, tid
,
443 offsetof (struct user
, u_debugreg
[regnum
]), 0);
445 error ("Couldn't read debug register");
451 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
455 tid
= ptid_get_lwp (ptid
);
458 ptrace (PTRACE_POKEUSER
, tid
,
459 offsetof (struct user
, u_debugreg
[regnum
]), value
);
461 error ("Couldn't write debug register");
465 update_debug_registers_callback (struct inferior_list_entry
*entry
,
468 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
469 int pid
= *(int *) pid_p
;
471 /* Only update the threads of this process. */
472 if (pid_of (lwp
) == pid
)
474 /* The actual update is done later just before resuming the lwp,
475 we just mark that the registers need updating. */
476 lwp
->arch_private
->debug_registers_changed
= 1;
478 /* If the lwp isn't stopped, force it to momentarily pause, so
479 we can update its debug registers. */
481 linux_stop_lwp (lwp
);
487 /* Update the inferior's debug register REGNUM from STATE. */
490 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
492 /* Only update the threads of this process. */
493 int pid
= pid_of (get_thread_lwp (current_inferior
));
495 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
496 fatal ("Invalid debug register %d", regnum
);
498 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
501 /* Return the inferior's debug register REGNUM. */
504 i386_dr_low_get_addr (int regnum
)
506 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
507 ptid_t ptid
= ptid_of (lwp
);
509 /* DR6 and DR7 are retrieved with some other way. */
510 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
< DR_LASTADDR
);
512 return x86_linux_dr_get (ptid
, regnum
);
515 /* Update the inferior's DR7 debug control register from STATE. */
518 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
520 /* Only update the threads of this process. */
521 int pid
= pid_of (get_thread_lwp (current_inferior
));
523 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
526 /* Return the inferior's DR7 debug control register. */
529 i386_dr_low_get_control (void)
531 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
532 ptid_t ptid
= ptid_of (lwp
);
534 return x86_linux_dr_get (ptid
, DR_CONTROL
);
537 /* Get the value of the DR6 debug status register from the inferior
538 and record it in STATE. */
541 i386_dr_low_get_status (void)
543 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
544 ptid_t ptid
= ptid_of (lwp
);
546 return x86_linux_dr_get (ptid
, DR_STATUS
);
549 /* Breakpoint/Watchpoint support. */
552 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
554 struct process_info
*proc
= current_process ();
561 ret
= prepare_to_access_memory ();
564 ret
= set_gdb_breakpoint_at (addr
);
565 done_accessing_memory ();
571 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
580 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
582 struct process_info
*proc
= current_process ();
589 ret
= prepare_to_access_memory ();
592 ret
= delete_gdb_breakpoint_at (addr
);
593 done_accessing_memory ();
599 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
608 x86_stopped_by_watchpoint (void)
610 struct process_info
*proc
= current_process ();
611 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
615 x86_stopped_data_address (void)
617 struct process_info
*proc
= current_process ();
619 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
625 /* Called when a new process is created. */
627 static struct arch_process_info
*
628 x86_linux_new_process (void)
630 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
632 i386_low_init_dregs (&info
->debug_reg_state
);
637 /* Called when a new thread is detected. */
639 static struct arch_lwp_info
*
640 x86_linux_new_thread (void)
642 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
644 info
->debug_registers_changed
= 1;
649 /* Called when resuming a thread.
650 If the debug regs have changed, update the thread's copies. */
653 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
655 ptid_t ptid
= ptid_of (lwp
);
657 if (lwp
->arch_private
->debug_registers_changed
)
660 int pid
= ptid_get_pid (ptid
);
661 struct process_info
*proc
= find_process_pid (pid
);
662 struct i386_debug_reg_state
*state
= &proc
->private->arch_private
->debug_reg_state
;
664 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
665 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
667 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
669 lwp
->arch_private
->debug_registers_changed
= 0;
672 if (lwp
->stopped_by_watchpoint
)
673 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
676 /* When GDBSERVER is built as a 64-bit application on linux, the
677 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
678 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
679 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
680 conversion in-place ourselves. */
682 /* These types below (compat_*) define a siginfo type that is layout
683 compatible with the siginfo type exported by the 32-bit userspace
688 typedef int compat_int_t
;
689 typedef unsigned int compat_uptr_t
;
691 typedef int compat_time_t
;
692 typedef int compat_timer_t
;
693 typedef int compat_clock_t
;
695 struct compat_timeval
697 compat_time_t tv_sec
;
701 typedef union compat_sigval
703 compat_int_t sival_int
;
704 compat_uptr_t sival_ptr
;
707 typedef struct compat_siginfo
715 int _pad
[((128 / sizeof (int)) - 3)];
724 /* POSIX.1b timers */
729 compat_sigval_t _sigval
;
732 /* POSIX.1b signals */
737 compat_sigval_t _sigval
;
746 compat_clock_t _utime
;
747 compat_clock_t _stime
;
750 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
765 #define cpt_si_pid _sifields._kill._pid
766 #define cpt_si_uid _sifields._kill._uid
767 #define cpt_si_timerid _sifields._timer._tid
768 #define cpt_si_overrun _sifields._timer._overrun
769 #define cpt_si_status _sifields._sigchld._status
770 #define cpt_si_utime _sifields._sigchld._utime
771 #define cpt_si_stime _sifields._sigchld._stime
772 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
773 #define cpt_si_addr _sifields._sigfault._addr
774 #define cpt_si_band _sifields._sigpoll._band
775 #define cpt_si_fd _sifields._sigpoll._fd
777 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
778 In their place is si_timer1,si_timer2. */
780 #define si_timerid si_timer1
783 #define si_overrun si_timer2
787 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
789 memset (to
, 0, sizeof (*to
));
791 to
->si_signo
= from
->si_signo
;
792 to
->si_errno
= from
->si_errno
;
793 to
->si_code
= from
->si_code
;
797 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
799 else if (to
->si_code
== SI_USER
)
801 to
->cpt_si_pid
= from
->si_pid
;
802 to
->cpt_si_uid
= from
->si_uid
;
804 else if (to
->si_code
== SI_TIMER
)
806 to
->cpt_si_timerid
= from
->si_timerid
;
807 to
->cpt_si_overrun
= from
->si_overrun
;
808 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
812 switch (to
->si_signo
)
815 to
->cpt_si_pid
= from
->si_pid
;
816 to
->cpt_si_uid
= from
->si_uid
;
817 to
->cpt_si_status
= from
->si_status
;
818 to
->cpt_si_utime
= from
->si_utime
;
819 to
->cpt_si_stime
= from
->si_stime
;
825 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
828 to
->cpt_si_band
= from
->si_band
;
829 to
->cpt_si_fd
= from
->si_fd
;
832 to
->cpt_si_pid
= from
->si_pid
;
833 to
->cpt_si_uid
= from
->si_uid
;
834 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
841 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
843 memset (to
, 0, sizeof (*to
));
845 to
->si_signo
= from
->si_signo
;
846 to
->si_errno
= from
->si_errno
;
847 to
->si_code
= from
->si_code
;
851 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
853 else if (to
->si_code
== SI_USER
)
855 to
->si_pid
= from
->cpt_si_pid
;
856 to
->si_uid
= from
->cpt_si_uid
;
858 else if (to
->si_code
== SI_TIMER
)
860 to
->si_timerid
= from
->cpt_si_timerid
;
861 to
->si_overrun
= from
->cpt_si_overrun
;
862 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
866 switch (to
->si_signo
)
869 to
->si_pid
= from
->cpt_si_pid
;
870 to
->si_uid
= from
->cpt_si_uid
;
871 to
->si_status
= from
->cpt_si_status
;
872 to
->si_utime
= from
->cpt_si_utime
;
873 to
->si_stime
= from
->cpt_si_stime
;
879 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
882 to
->si_band
= from
->cpt_si_band
;
883 to
->si_fd
= from
->cpt_si_fd
;
886 to
->si_pid
= from
->cpt_si_pid
;
887 to
->si_uid
= from
->cpt_si_uid
;
888 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
894 #endif /* __x86_64__ */
896 /* Convert a native/host siginfo object, into/from the siginfo in the
897 layout of the inferiors' architecture. Returns true if any
898 conversion was done; false otherwise. If DIRECTION is 1, then copy
899 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
903 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
906 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
907 if (register_size (0) == 4)
909 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
910 fatal ("unexpected difference in siginfo");
913 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
915 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
926 /* Update gdbserver_xmltarget. */
929 x86_linux_update_xmltarget (void)
932 struct regset_info
*regset
;
933 static unsigned long long xcr0
;
934 static int have_ptrace_getregset
= -1;
935 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
936 static int have_ptrace_getfpxregs
= -1;
939 if (!current_inferior
)
942 /* Before changing the register cache internal layout or the target
943 regsets, flush the contents of the current valid caches back to
945 regcache_invalidate ();
947 pid
= pid_of (get_thread_lwp (current_inferior
));
949 if (num_xmm_registers
== 8)
950 init_registers_i386_linux ();
952 init_registers_amd64_linux ();
955 # ifdef HAVE_PTRACE_GETFPXREGS
956 if (have_ptrace_getfpxregs
== -1)
958 elf_fpxregset_t fpxregs
;
960 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
962 have_ptrace_getfpxregs
= 0;
963 x86_xcr0
= I386_XSTATE_X87_MASK
;
965 /* Disable PTRACE_GETFPXREGS. */
966 for (regset
= target_regsets
;
967 regset
->fill_function
!= NULL
; regset
++)
968 if (regset
->get_request
== PTRACE_GETFPXREGS
)
975 have_ptrace_getfpxregs
= 1;
978 if (!have_ptrace_getfpxregs
)
980 init_registers_i386_mmx_linux ();
984 init_registers_i386_linux ();
992 if (num_xmm_registers
== 8)
993 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
995 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
997 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1000 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1005 /* Check if XSAVE extended state is supported. */
1006 if (have_ptrace_getregset
== -1)
1008 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1011 iov
.iov_base
= xstateregs
;
1012 iov
.iov_len
= sizeof (xstateregs
);
1014 /* Check if PTRACE_GETREGSET works. */
1015 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1018 have_ptrace_getregset
= 0;
1022 have_ptrace_getregset
= 1;
1024 /* Get XCR0 from XSAVE extended state at byte 464. */
1025 xcr0
= xstateregs
[464 / sizeof (long long)];
1027 /* Use PTRACE_GETREGSET if it is available. */
1028 for (regset
= target_regsets
;
1029 regset
->fill_function
!= NULL
; regset
++)
1030 if (regset
->get_request
== PTRACE_GETREGSET
)
1031 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1032 else if (regset
->type
!= GENERAL_REGS
)
1036 if (have_ptrace_getregset
)
1038 /* AVX is the highest feature we support. */
1039 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1044 /* I386 has 8 xmm regs. */
1045 if (num_xmm_registers
== 8)
1046 init_registers_i386_avx_linux ();
1048 init_registers_amd64_avx_linux ();
1050 init_registers_i386_avx_linux ();
1056 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1057 PTRACE_GETREGSET. */
1060 x86_linux_process_qsupported (const char *query
)
1062 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1063 with "i386" in qSupported query, it supports x86 XML target
1066 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1068 char *copy
= xstrdup (query
+ 13);
1071 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1073 if (strcmp (p
, "i386") == 0)
1083 x86_linux_update_xmltarget ();
1086 /* Initialize gdbserver for the architecture of the inferior. */
1089 x86_arch_setup (void)
1092 int pid
= pid_of (get_thread_lwp (current_inferior
));
1093 char *file
= linux_child_pid_to_exec_file (pid
);
1094 int use_64bit
= elf_64_file_p (file
);
1100 /* This can only happen if /proc/<pid>/exe is unreadable,
1101 but "that can't happen" if we've gotten this far.
1102 Fall through and assume this is a 32-bit program. */
1106 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1107 the_low_target
.num_regs
= -1;
1108 the_low_target
.regmap
= NULL
;
1109 the_low_target
.cannot_fetch_register
= NULL
;
1110 the_low_target
.cannot_store_register
= NULL
;
1112 /* Amd64 has 16 xmm regs. */
1113 num_xmm_registers
= 16;
1115 x86_linux_update_xmltarget ();
1120 /* Ok we have a 32-bit inferior. */
1122 the_low_target
.num_regs
= I386_NUM_REGS
;
1123 the_low_target
.regmap
= i386_regmap
;
1124 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1125 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1127 /* I386 has 8 xmm regs. */
1128 num_xmm_registers
= 8;
1130 x86_linux_update_xmltarget ();
1134 x86_supports_tracepoints (void)
1140 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1142 write_inferior_memory (*to
, buf
, len
);
1147 push_opcode (unsigned char *buf
, char *op
)
1149 unsigned char *buf_org
= buf
;
1154 unsigned long ul
= strtoul (op
, &endptr
, 16);
1163 return buf
- buf_org
;
1168 /* Build a jump pad that saves registers and calls a collection
1169 function. Writes a jump instruction to the jump pad to
1170 JJUMPAD_INSN. The caller is responsible to write it in at the
1171 tracepoint address. */
1174 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1175 CORE_ADDR collector
,
1178 CORE_ADDR
*jump_entry
,
1179 unsigned char *jjump_pad_insn
,
1180 ULONGEST
*jjump_pad_insn_size
,
1181 CORE_ADDR
*adjusted_insn_addr
,
1182 CORE_ADDR
*adjusted_insn_addr_end
)
1184 unsigned char buf
[40];
1186 CORE_ADDR buildaddr
= *jump_entry
;
1188 /* Build the jump pad. */
1190 /* First, do tracepoint data collection. Save registers. */
1192 /* Need to ensure stack pointer saved first. */
1193 buf
[i
++] = 0x54; /* push %rsp */
1194 buf
[i
++] = 0x55; /* push %rbp */
1195 buf
[i
++] = 0x57; /* push %rdi */
1196 buf
[i
++] = 0x56; /* push %rsi */
1197 buf
[i
++] = 0x52; /* push %rdx */
1198 buf
[i
++] = 0x51; /* push %rcx */
1199 buf
[i
++] = 0x53; /* push %rbx */
1200 buf
[i
++] = 0x50; /* push %rax */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1209 buf
[i
++] = 0x9c; /* pushfq */
1210 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1212 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1213 i
+= sizeof (unsigned long);
1214 buf
[i
++] = 0x57; /* push %rdi */
1215 append_insns (&buildaddr
, i
, buf
);
1217 /* Stack space for the collecting_t object. */
1219 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1220 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1221 memcpy (buf
+ i
, &tpoint
, 8);
1223 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1224 i
+= push_opcode (&buf
[i
],
1225 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1226 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1227 append_insns (&buildaddr
, i
, buf
);
1231 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1232 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1234 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1235 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1236 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1237 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1238 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1239 append_insns (&buildaddr
, i
, buf
);
1241 /* Set up the gdb_collect call. */
1242 /* At this point, (stack pointer + 0x18) is the base of our saved
1246 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1247 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1249 /* tpoint address may be 64-bit wide. */
1250 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1251 memcpy (buf
+ i
, &tpoint
, 8);
1253 append_insns (&buildaddr
, i
, buf
);
1255 /* The collector function being in the shared library, may be
1256 >31-bits away off the jump pad. */
1258 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1259 memcpy (buf
+ i
, &collector
, 8);
1261 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1262 append_insns (&buildaddr
, i
, buf
);
1264 /* Clear the spin-lock. */
1266 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1267 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1268 memcpy (buf
+ i
, &lockaddr
, 8);
1270 append_insns (&buildaddr
, i
, buf
);
1272 /* Remove stack that had been used for the collect_t object. */
1274 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1275 append_insns (&buildaddr
, i
, buf
);
1277 /* Restore register state. */
1279 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1283 buf
[i
++] = 0x9d; /* popfq */
1284 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1285 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1286 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1287 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1288 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1289 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1290 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1291 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1292 buf
[i
++] = 0x58; /* pop %rax */
1293 buf
[i
++] = 0x5b; /* pop %rbx */
1294 buf
[i
++] = 0x59; /* pop %rcx */
1295 buf
[i
++] = 0x5a; /* pop %rdx */
1296 buf
[i
++] = 0x5e; /* pop %rsi */
1297 buf
[i
++] = 0x5f; /* pop %rdi */
1298 buf
[i
++] = 0x5d; /* pop %rbp */
1299 buf
[i
++] = 0x5c; /* pop %rsp */
1300 append_insns (&buildaddr
, i
, buf
);
1302 /* Now, adjust the original instruction to execute in the jump
1304 *adjusted_insn_addr
= buildaddr
;
1305 relocate_instruction (&buildaddr
, tpaddr
);
1306 *adjusted_insn_addr_end
= buildaddr
;
1308 /* Finally, write a jump back to the program. */
1309 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1310 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1311 memcpy (buf
+ 1, &offset
, 4);
1312 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1314 /* The jump pad is now built. Wire in a jump to our jump pad. This
1315 is always done last (by our caller actually), so that we can
1316 install fast tracepoints with threads running. This relies on
1317 the agent's atomic write support. */
1318 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1319 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1320 memcpy (buf
+ 1, &offset
, 4);
1321 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1322 *jjump_pad_insn_size
= sizeof (jump_insn
);
1324 /* Return the end address of our pad. */
1325 *jump_entry
= buildaddr
;
1330 #endif /* __x86_64__ */
1332 /* Build a jump pad that saves registers and calls a collection
1333 function. Writes a jump instruction to the jump pad to
1334 JJUMPAD_INSN. The caller is responsible to write it in at the
1335 tracepoint address. */
1338 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1339 CORE_ADDR collector
,
1342 CORE_ADDR
*jump_entry
,
1343 unsigned char *jjump_pad_insn
,
1344 ULONGEST
*jjump_pad_insn_size
,
1345 CORE_ADDR
*adjusted_insn_addr
,
1346 CORE_ADDR
*adjusted_insn_addr_end
)
1348 unsigned char buf
[0x100];
1350 CORE_ADDR buildaddr
= *jump_entry
;
1352 /* Build the jump pad. */
1354 /* First, do tracepoint data collection. Save registers. */
1356 buf
[i
++] = 0x60; /* pushad */
1357 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1358 *((int *)(buf
+ i
)) = (int) tpaddr
;
1360 buf
[i
++] = 0x9c; /* pushf */
1361 buf
[i
++] = 0x1e; /* push %ds */
1362 buf
[i
++] = 0x06; /* push %es */
1363 buf
[i
++] = 0x0f; /* push %fs */
1365 buf
[i
++] = 0x0f; /* push %gs */
1367 buf
[i
++] = 0x16; /* push %ss */
1368 buf
[i
++] = 0x0e; /* push %cs */
1369 append_insns (&buildaddr
, i
, buf
);
1371 /* Stack space for the collecting_t object. */
1373 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1375 /* Build the object. */
1376 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1377 memcpy (buf
+ i
, &tpoint
, 4);
1379 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1381 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1382 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1383 append_insns (&buildaddr
, i
, buf
);
1385 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1386 If we cared for it, this could be using xchg alternatively. */
1389 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1390 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1392 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1394 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1395 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1396 append_insns (&buildaddr
, i
, buf
);
1399 /* Set up arguments to the gdb_collect call. */
1401 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1402 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1403 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1404 append_insns (&buildaddr
, i
, buf
);
1407 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1408 append_insns (&buildaddr
, i
, buf
);
1411 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1412 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1414 append_insns (&buildaddr
, i
, buf
);
1416 buf
[0] = 0xe8; /* call <reladdr> */
1417 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1418 memcpy (buf
+ 1, &offset
, 4);
1419 append_insns (&buildaddr
, 5, buf
);
1420 /* Clean up after the call. */
1421 buf
[0] = 0x83; /* add $0x8,%esp */
1424 append_insns (&buildaddr
, 3, buf
);
1427 /* Clear the spin-lock. This would need the LOCK prefix on older
1430 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1431 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1432 memcpy (buf
+ i
, &lockaddr
, 4);
1434 append_insns (&buildaddr
, i
, buf
);
1437 /* Remove stack that had been used for the collect_t object. */
1439 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1440 append_insns (&buildaddr
, i
, buf
);
1443 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1446 buf
[i
++] = 0x17; /* pop %ss */
1447 buf
[i
++] = 0x0f; /* pop %gs */
1449 buf
[i
++] = 0x0f; /* pop %fs */
1451 buf
[i
++] = 0x07; /* pop %es */
1452 buf
[i
++] = 0x1f; /* pop %de */
1453 buf
[i
++] = 0x9d; /* popf */
1454 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1457 buf
[i
++] = 0x61; /* popad */
1458 append_insns (&buildaddr
, i
, buf
);
1460 /* Now, adjust the original instruction to execute in the jump
1462 *adjusted_insn_addr
= buildaddr
;
1463 relocate_instruction (&buildaddr
, tpaddr
);
1464 *adjusted_insn_addr_end
= buildaddr
;
1466 /* Write the jump back to the program. */
1467 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1468 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1469 memcpy (buf
+ 1, &offset
, 4);
1470 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1472 /* The jump pad is now built. Wire in a jump to our jump pad. This
1473 is always done last (by our caller actually), so that we can
1474 install fast tracepoints with threads running. This relies on
1475 the agent's atomic write support. */
1476 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1477 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1478 memcpy (buf
+ 1, &offset
, 4);
1479 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1480 *jjump_pad_insn_size
= sizeof (jump_insn
);
1482 /* Return the end address of our pad. */
1483 *jump_entry
= buildaddr
;
1489 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1490 CORE_ADDR collector
,
1493 CORE_ADDR
*jump_entry
,
1494 unsigned char *jjump_pad_insn
,
1495 ULONGEST
*jjump_pad_insn_size
,
1496 CORE_ADDR
*adjusted_insn_addr
,
1497 CORE_ADDR
*adjusted_insn_addr_end
)
1500 if (register_size (0) == 8)
1501 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1502 collector
, lockaddr
,
1503 orig_size
, jump_entry
,
1505 jjump_pad_insn_size
,
1507 adjusted_insn_addr_end
);
1510 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1511 collector
, lockaddr
,
1512 orig_size
, jump_entry
,
1514 jjump_pad_insn_size
,
1516 adjusted_insn_addr_end
);
1520 add_insns (unsigned char *start
, int len
)
1522 CORE_ADDR buildaddr
= current_insn_ptr
;
1525 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1526 len
, paddress (buildaddr
));
1528 append_insns (&buildaddr
, len
, start
);
1529 current_insn_ptr
= buildaddr
;
1532 /* Our general strategy for emitting code is to avoid specifying raw
1533 bytes whenever possible, and instead copy a block of inline asm
1534 that is embedded in the function. This is a little messy, because
1535 we need to keep the compiler from discarding what looks like dead
1536 code, plus suppress various warnings. */
1538 #define EMIT_ASM(NAME, INSNS) \
1541 extern unsigned char start_ ## NAME, end_ ## NAME; \
1542 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1543 __asm__ ("jmp end_" #NAME "\n" \
1544 "\t" "start_" #NAME ":" \
1546 "\t" "end_" #NAME ":"); \
1551 #define EMIT_ASM32(NAME,INSNS) \
1554 extern unsigned char start_ ## NAME, end_ ## NAME; \
1555 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1556 __asm__ (".code32\n" \
1557 "\t" "jmp end_" #NAME "\n" \
1558 "\t" "start_" #NAME ":\n" \
1560 "\t" "end_" #NAME ":\n" \
1566 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1573 amd64_emit_prologue (void)
1575 EMIT_ASM (amd64_prologue
,
1577 "movq %rsp,%rbp\n\t"
1578 "sub $0x20,%rsp\n\t"
1579 "movq %rdi,-8(%rbp)\n\t"
1580 "movq %rsi,-16(%rbp)");
1585 amd64_emit_epilogue (void)
1587 EMIT_ASM (amd64_epilogue
,
1588 "movq -16(%rbp),%rdi\n\t"
1589 "movq %rax,(%rdi)\n\t"
1596 amd64_emit_add (void)
1598 EMIT_ASM (amd64_add
,
1599 "add (%rsp),%rax\n\t"
1600 "lea 0x8(%rsp),%rsp");
1604 amd64_emit_sub (void)
1606 EMIT_ASM (amd64_sub
,
1607 "sub %rax,(%rsp)\n\t"
1612 amd64_emit_mul (void)
1618 amd64_emit_lsh (void)
1624 amd64_emit_rsh_signed (void)
1630 amd64_emit_rsh_unsigned (void)
1636 amd64_emit_ext (int arg
)
1641 EMIT_ASM (amd64_ext_8
,
1647 EMIT_ASM (amd64_ext_16
,
1652 EMIT_ASM (amd64_ext_32
,
1661 amd64_emit_log_not (void)
1663 EMIT_ASM (amd64_log_not
,
1664 "test %rax,%rax\n\t"
1670 amd64_emit_bit_and (void)
1672 EMIT_ASM (amd64_and
,
1673 "and (%rsp),%rax\n\t"
1674 "lea 0x8(%rsp),%rsp");
1678 amd64_emit_bit_or (void)
1681 "or (%rsp),%rax\n\t"
1682 "lea 0x8(%rsp),%rsp");
1686 amd64_emit_bit_xor (void)
1688 EMIT_ASM (amd64_xor
,
1689 "xor (%rsp),%rax\n\t"
1690 "lea 0x8(%rsp),%rsp");
1694 amd64_emit_bit_not (void)
1696 EMIT_ASM (amd64_bit_not
,
1697 "xorq $0xffffffffffffffff,%rax");
1701 amd64_emit_equal (void)
1703 EMIT_ASM (amd64_equal
,
1704 "cmp %rax,(%rsp)\n\t"
1705 "je .Lamd64_equal_true\n\t"
1707 "jmp .Lamd64_equal_end\n\t"
1708 ".Lamd64_equal_true:\n\t"
1710 ".Lamd64_equal_end:\n\t"
1711 "lea 0x8(%rsp),%rsp");
1715 amd64_emit_less_signed (void)
1717 EMIT_ASM (amd64_less_signed
,
1718 "cmp %rax,(%rsp)\n\t"
1719 "jl .Lamd64_less_signed_true\n\t"
1721 "jmp .Lamd64_less_signed_end\n\t"
1722 ".Lamd64_less_signed_true:\n\t"
1724 ".Lamd64_less_signed_end:\n\t"
1725 "lea 0x8(%rsp),%rsp");
1729 amd64_emit_less_unsigned (void)
1731 EMIT_ASM (amd64_less_unsigned
,
1732 "cmp %rax,(%rsp)\n\t"
1733 "jb .Lamd64_less_unsigned_true\n\t"
1735 "jmp .Lamd64_less_unsigned_end\n\t"
1736 ".Lamd64_less_unsigned_true:\n\t"
1738 ".Lamd64_less_unsigned_end:\n\t"
1739 "lea 0x8(%rsp),%rsp");
1743 amd64_emit_ref (int size
)
1748 EMIT_ASM (amd64_ref1
,
1752 EMIT_ASM (amd64_ref2
,
1756 EMIT_ASM (amd64_ref4
,
1757 "movl (%rax),%eax");
1760 EMIT_ASM (amd64_ref8
,
1761 "movq (%rax),%rax");
1767 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1769 EMIT_ASM (amd64_if_goto
,
1773 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1781 amd64_emit_goto (int *offset_p
, int *size_p
)
1783 EMIT_ASM (amd64_goto
,
1784 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1792 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1794 int diff
= (to
- (from
+ size
));
1795 unsigned char buf
[sizeof (int)];
1803 memcpy (buf
, &diff
, sizeof (int));
1804 write_inferior_memory (from
, buf
, sizeof (int));
1808 amd64_emit_const (LONGEST num
)
1810 unsigned char buf
[16];
1812 CORE_ADDR buildaddr
= current_insn_ptr
;
1815 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1816 *((LONGEST
*) (&buf
[i
])) = num
;
1818 append_insns (&buildaddr
, i
, buf
);
1819 current_insn_ptr
= buildaddr
;
1823 amd64_emit_call (CORE_ADDR fn
)
1825 unsigned char buf
[16];
1827 CORE_ADDR buildaddr
;
1830 /* The destination function being in the shared library, may be
1831 >31-bits away off the compiled code pad. */
1833 buildaddr
= current_insn_ptr
;
1835 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1839 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1841 /* Offset is too large for a call. Use callq, but that requires
1842 a register, so avoid it if possible. Use r10, since it is
1843 call-clobbered, we don't have to push/pop it. */
1844 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1846 memcpy (buf
+ i
, &fn
, 8);
1848 buf
[i
++] = 0xff; /* callq *%r10 */
1853 int offset32
= offset64
; /* we know we can't overflow here. */
1854 memcpy (buf
+ i
, &offset32
, 4);
1858 append_insns (&buildaddr
, i
, buf
);
1859 current_insn_ptr
= buildaddr
;
1863 amd64_emit_reg (int reg
)
1865 unsigned char buf
[16];
1867 CORE_ADDR buildaddr
;
1869 /* Assume raw_regs is still in %rdi. */
1870 buildaddr
= current_insn_ptr
;
1872 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1873 *((int *) (&buf
[i
])) = reg
;
1875 append_insns (&buildaddr
, i
, buf
);
1876 current_insn_ptr
= buildaddr
;
1877 amd64_emit_call (get_raw_reg_func_addr ());
1881 amd64_emit_pop (void)
1883 EMIT_ASM (amd64_pop
,
1888 amd64_emit_stack_flush (void)
1890 EMIT_ASM (amd64_stack_flush
,
1895 amd64_emit_zero_ext (int arg
)
1900 EMIT_ASM (amd64_zero_ext_8
,
1904 EMIT_ASM (amd64_zero_ext_16
,
1905 "and $0xffff,%rax");
1908 EMIT_ASM (amd64_zero_ext_32
,
1909 "mov $0xffffffff,%rcx\n\t"
1918 amd64_emit_swap (void)
1920 EMIT_ASM (amd64_swap
,
1927 amd64_emit_stack_adjust (int n
)
1929 unsigned char buf
[16];
1931 CORE_ADDR buildaddr
= current_insn_ptr
;
1934 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1938 /* This only handles adjustments up to 16, but we don't expect any more. */
1940 append_insns (&buildaddr
, i
, buf
);
1941 current_insn_ptr
= buildaddr
;
1944 /* FN's prototype is `LONGEST(*fn)(int)'. */
1947 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1949 unsigned char buf
[16];
1951 CORE_ADDR buildaddr
;
1953 buildaddr
= current_insn_ptr
;
1955 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1956 *((int *) (&buf
[i
])) = arg1
;
1958 append_insns (&buildaddr
, i
, buf
);
1959 current_insn_ptr
= buildaddr
;
1960 amd64_emit_call (fn
);
1963 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1966 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1968 unsigned char buf
[16];
1970 CORE_ADDR buildaddr
;
1972 buildaddr
= current_insn_ptr
;
1974 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1975 *((int *) (&buf
[i
])) = arg1
;
1977 append_insns (&buildaddr
, i
, buf
);
1978 current_insn_ptr
= buildaddr
;
1979 EMIT_ASM (amd64_void_call_2_a
,
1980 /* Save away a copy of the stack top. */
1982 /* Also pass top as the second argument. */
1984 amd64_emit_call (fn
);
1985 EMIT_ASM (amd64_void_call_2_b
,
1986 /* Restore the stack top, %rax may have been trashed. */
1990 struct emit_ops amd64_emit_ops
=
1992 amd64_emit_prologue
,
1993 amd64_emit_epilogue
,
1998 amd64_emit_rsh_signed
,
1999 amd64_emit_rsh_unsigned
,
2007 amd64_emit_less_signed
,
2008 amd64_emit_less_unsigned
,
2012 amd64_write_goto_address
,
2017 amd64_emit_stack_flush
,
2018 amd64_emit_zero_ext
,
2020 amd64_emit_stack_adjust
,
2021 amd64_emit_int_call_1
,
2022 amd64_emit_void_call_2
2025 #endif /* __x86_64__ */
2028 i386_emit_prologue (void)
2030 EMIT_ASM32 (i386_prologue
,
2033 /* At this point, the raw regs base address is at 8(%ebp), and the
2034 value pointer is at 12(%ebp). */
2038 i386_emit_epilogue (void)
2040 EMIT_ASM32 (i386_epilogue
,
2041 "mov 12(%ebp),%ecx\n\t"
2042 "mov %eax,(%ecx)\n\t"
2043 "mov %ebx,0x4(%ecx)\n\t"
2050 i386_emit_add (void)
2052 EMIT_ASM32 (i386_add
,
2053 "add (%esp),%eax\n\t"
2054 "adc 0x4(%esp),%ebx\n\t"
2055 "lea 0x8(%esp),%esp");
2059 i386_emit_sub (void)
2061 EMIT_ASM32 (i386_sub
,
2062 "subl %eax,(%esp)\n\t"
2063 "sbbl %ebx,4(%esp)\n\t"
2069 i386_emit_mul (void)
2075 i386_emit_lsh (void)
2081 i386_emit_rsh_signed (void)
2087 i386_emit_rsh_unsigned (void)
2093 i386_emit_ext (int arg
)
2098 EMIT_ASM32 (i386_ext_8
,
2101 "movl %eax,%ebx\n\t"
2105 EMIT_ASM32 (i386_ext_16
,
2107 "movl %eax,%ebx\n\t"
2111 EMIT_ASM32 (i386_ext_32
,
2112 "movl %eax,%ebx\n\t"
2121 i386_emit_log_not (void)
2123 EMIT_ASM32 (i386_log_not
,
2125 "test %eax,%eax\n\t"
2132 i386_emit_bit_and (void)
2134 EMIT_ASM32 (i386_and
,
2135 "and (%esp),%eax\n\t"
2136 "and 0x4(%esp),%ebx\n\t"
2137 "lea 0x8(%esp),%esp");
2141 i386_emit_bit_or (void)
2143 EMIT_ASM32 (i386_or
,
2144 "or (%esp),%eax\n\t"
2145 "or 0x4(%esp),%ebx\n\t"
2146 "lea 0x8(%esp),%esp");
2150 i386_emit_bit_xor (void)
2152 EMIT_ASM32 (i386_xor
,
2153 "xor (%esp),%eax\n\t"
2154 "xor 0x4(%esp),%ebx\n\t"
2155 "lea 0x8(%esp),%esp");
2159 i386_emit_bit_not (void)
2161 EMIT_ASM32 (i386_bit_not
,
2162 "xor $0xffffffff,%eax\n\t"
2163 "xor $0xffffffff,%ebx\n\t");
2167 i386_emit_equal (void)
2169 EMIT_ASM32 (i386_equal
,
2170 "cmpl %ebx,4(%esp)\n\t"
2171 "jne .Li386_equal_false\n\t"
2172 "cmpl %eax,(%esp)\n\t"
2173 "je .Li386_equal_true\n\t"
2174 ".Li386_equal_false:\n\t"
2176 "jmp .Li386_equal_end\n\t"
2177 ".Li386_equal_true:\n\t"
2179 ".Li386_equal_end:\n\t"
2181 "lea 0x8(%esp),%esp");
2185 i386_emit_less_signed (void)
2187 EMIT_ASM32 (i386_less_signed
,
2188 "cmpl %ebx,4(%esp)\n\t"
2189 "jl .Li386_less_signed_true\n\t"
2190 "jne .Li386_less_signed_false\n\t"
2191 "cmpl %eax,(%esp)\n\t"
2192 "jl .Li386_less_signed_true\n\t"
2193 ".Li386_less_signed_false:\n\t"
2195 "jmp .Li386_less_signed_end\n\t"
2196 ".Li386_less_signed_true:\n\t"
2198 ".Li386_less_signed_end:\n\t"
2200 "lea 0x8(%esp),%esp");
2204 i386_emit_less_unsigned (void)
2206 EMIT_ASM32 (i386_less_unsigned
,
2207 "cmpl %ebx,4(%esp)\n\t"
2208 "jb .Li386_less_unsigned_true\n\t"
2209 "jne .Li386_less_unsigned_false\n\t"
2210 "cmpl %eax,(%esp)\n\t"
2211 "jb .Li386_less_unsigned_true\n\t"
2212 ".Li386_less_unsigned_false:\n\t"
2214 "jmp .Li386_less_unsigned_end\n\t"
2215 ".Li386_less_unsigned_true:\n\t"
2217 ".Li386_less_unsigned_end:\n\t"
2219 "lea 0x8(%esp),%esp");
2223 i386_emit_ref (int size
)
2228 EMIT_ASM32 (i386_ref1
,
2232 EMIT_ASM32 (i386_ref2
,
2236 EMIT_ASM32 (i386_ref4
,
2237 "movl (%eax),%eax");
2240 EMIT_ASM32 (i386_ref8
,
2241 "movl 4(%eax),%ebx\n\t"
2242 "movl (%eax),%eax");
2248 i386_emit_if_goto (int *offset_p
, int *size_p
)
2250 EMIT_ASM32 (i386_if_goto
,
2256 /* Don't trust the assembler to choose the right jump */
2257 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2260 *offset_p
= 11; /* be sure that this matches the sequence above */
2266 i386_emit_goto (int *offset_p
, int *size_p
)
2268 EMIT_ASM32 (i386_goto
,
2269 /* Don't trust the assembler to choose the right jump */
2270 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2278 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2280 int diff
= (to
- (from
+ size
));
2281 unsigned char buf
[sizeof (int)];
2283 /* We're only doing 4-byte sizes at the moment. */
2290 memcpy (buf
, &diff
, sizeof (int));
2291 write_inferior_memory (from
, buf
, sizeof (int));
2295 i386_emit_const (LONGEST num
)
2297 unsigned char buf
[16];
2299 CORE_ADDR buildaddr
= current_insn_ptr
;
2302 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2303 *((int *) (&buf
[i
])) = (num
& 0xffffffff);
2305 hi
= ((num
>> 32) & 0xffffffff);
2308 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2309 *((int *) (&buf
[i
])) = hi
;
2314 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2316 append_insns (&buildaddr
, i
, buf
);
2317 current_insn_ptr
= buildaddr
;
2321 i386_emit_call (CORE_ADDR fn
)
2323 unsigned char buf
[16];
2325 CORE_ADDR buildaddr
;
2327 buildaddr
= current_insn_ptr
;
2329 buf
[i
++] = 0xe8; /* call <reladdr> */
2330 offset
= ((int) fn
) - (buildaddr
+ 5);
2331 memcpy (buf
+ 1, &offset
, 4);
2332 append_insns (&buildaddr
, 5, buf
);
2333 current_insn_ptr
= buildaddr
;
2337 i386_emit_reg (int reg
)
2339 unsigned char buf
[16];
2341 CORE_ADDR buildaddr
;
2343 EMIT_ASM32 (i386_reg_a
,
2345 buildaddr
= current_insn_ptr
;
2347 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2348 *((int *) (&buf
[i
])) = reg
;
2350 append_insns (&buildaddr
, i
, buf
);
2351 current_insn_ptr
= buildaddr
;
2352 EMIT_ASM32 (i386_reg_b
,
2353 "mov %eax,4(%esp)\n\t"
2354 "mov 8(%ebp),%eax\n\t"
2356 i386_emit_call (get_raw_reg_func_addr ());
2357 EMIT_ASM32 (i386_reg_c
,
2359 "lea 0x8(%esp),%esp");
2363 i386_emit_pop (void)
2365 EMIT_ASM32 (i386_pop
,
2371 i386_emit_stack_flush (void)
2373 EMIT_ASM32 (i386_stack_flush
,
2379 i386_emit_zero_ext (int arg
)
2384 EMIT_ASM32 (i386_zero_ext_8
,
2385 "and $0xff,%eax\n\t"
2389 EMIT_ASM32 (i386_zero_ext_16
,
2390 "and $0xffff,%eax\n\t"
2394 EMIT_ASM32 (i386_zero_ext_32
,
2403 i386_emit_swap (void)
2405 EMIT_ASM32 (i386_swap
,
2415 i386_emit_stack_adjust (int n
)
2417 unsigned char buf
[16];
2419 CORE_ADDR buildaddr
= current_insn_ptr
;
2422 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2426 append_insns (&buildaddr
, i
, buf
);
2427 current_insn_ptr
= buildaddr
;
2430 /* FN's prototype is `LONGEST(*fn)(int)'. */
2433 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2435 unsigned char buf
[16];
2437 CORE_ADDR buildaddr
;
2439 EMIT_ASM32 (i386_int_call_1_a
,
2440 /* Reserve a bit of stack space. */
2442 /* Put the one argument on the stack. */
2443 buildaddr
= current_insn_ptr
;
2445 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2448 *((int *) (&buf
[i
])) = arg1
;
2450 append_insns (&buildaddr
, i
, buf
);
2451 current_insn_ptr
= buildaddr
;
2452 i386_emit_call (fn
);
2453 EMIT_ASM32 (i386_int_call_1_c
,
2455 "lea 0x8(%esp),%esp");
2458 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2461 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2463 unsigned char buf
[16];
2465 CORE_ADDR buildaddr
;
2467 EMIT_ASM32 (i386_void_call_2_a
,
2468 /* Preserve %eax only; we don't have to worry about %ebx. */
2470 /* Reserve a bit of stack space for arguments. */
2471 "sub $0x10,%esp\n\t"
2472 /* Copy "top" to the second argument position. (Note that
2473 we can't assume function won't scribble on its
2474 arguments, so don't try to restore from this.) */
2475 "mov %eax,4(%esp)\n\t"
2476 "mov %ebx,8(%esp)");
2477 /* Put the first argument on the stack. */
2478 buildaddr
= current_insn_ptr
;
2480 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2483 *((int *) (&buf
[i
])) = arg1
;
2485 append_insns (&buildaddr
, i
, buf
);
2486 current_insn_ptr
= buildaddr
;
2487 i386_emit_call (fn
);
2488 EMIT_ASM32 (i386_void_call_2_b
,
2489 "lea 0x10(%esp),%esp\n\t"
2490 /* Restore original stack top. */
2494 struct emit_ops i386_emit_ops
=
2502 i386_emit_rsh_signed
,
2503 i386_emit_rsh_unsigned
,
2511 i386_emit_less_signed
,
2512 i386_emit_less_unsigned
,
2516 i386_write_goto_address
,
2521 i386_emit_stack_flush
,
2524 i386_emit_stack_adjust
,
2525 i386_emit_int_call_1
,
2526 i386_emit_void_call_2
2530 static struct emit_ops
*
2534 int use_64bit
= register_size (0) == 8;
2537 return &amd64_emit_ops
;
2540 return &i386_emit_ops
;
2543 /* This is initialized assuming an amd64 target.
2544 x86_arch_setup will correct it for i386 or amd64 targets. */
2546 struct linux_target_ops the_low_target
=
2562 x86_stopped_by_watchpoint
,
2563 x86_stopped_data_address
,
2564 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2565 native i386 case (no registers smaller than an xfer unit), and are not
2566 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2569 /* need to fix up i386 siginfo if host is amd64 */
2571 x86_linux_new_process
,
2572 x86_linux_new_thread
,
2573 x86_linux_prepare_to_resume
,
2574 x86_linux_process_qsupported
,
2575 x86_supports_tracepoints
,
2576 x86_get_thread_area
,
2577 x86_install_fast_tracepoint_jump_pad
,