1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
46 /* Backward compatibility for gdb without XML support. */
48 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
54 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
92 /* Per-process arch-specific data we want to keep. */
94 struct arch_process_info
96 struct i386_debug_reg_state debug_reg_state
;
99 /* Per-thread arch-specific data we want to keep. */
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed
;
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap
[] =
114 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
115 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
116 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
117 DS
* 8, ES
* 8, FS
* 8, GS
* 8
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
125 static const int x86_64_regmap
[] =
127 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
128 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
129 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
130 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
131 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
132 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142 #else /* ! __x86_64__ */
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap
[] =
148 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
149 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
150 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
151 DS
* 4, ES
* 4, FS
* 4, GS
* 4
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* Called by libthread_db. */
161 ps_get_thread_area (const struct ps_prochandle
*ph
,
162 lwpid_t lwpid
, int idx
, void **base
)
165 int use_64bit
= register_size (0) == 8;
172 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
176 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
187 unsigned int desc
[4];
189 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
190 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
193 *(int *)base
= desc
[1];
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
204 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
207 int use_64bit
= register_size (0) == 8;
212 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
214 *addr
= (CORE_ADDR
) (uintptr_t) base
;
223 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
224 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
225 unsigned int desc
[4];
227 const int reg_thread_area
= 3; /* bits to scale down register value. */
230 collect_register_by_name (regcache
, "gs", &gs
);
232 idx
= gs
>> reg_thread_area
;
234 if (ptrace (PTRACE_GET_THREAD_AREA
,
235 lwpid_of (lwp
), (void *) (long) idx
, (unsigned long) &desc
) < 0)
246 i386_cannot_store_register (int regno
)
248 return regno
>= I386_NUM_REGS
;
252 i386_cannot_fetch_register (int regno
)
254 return regno
>= I386_NUM_REGS
;
258 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
263 if (register_size (0) == 8)
265 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
266 if (x86_64_regmap
[i
] != -1)
267 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
272 for (i
= 0; i
< I386_NUM_REGS
; i
++)
273 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
275 collect_register_by_name (regcache
, "orig_eax",
276 ((char *) buf
) + ORIG_EAX
* 4);
280 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
285 if (register_size (0) == 8)
287 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
288 if (x86_64_regmap
[i
] != -1)
289 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
294 for (i
= 0; i
< I386_NUM_REGS
; i
++)
295 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
297 supply_register_by_name (regcache
, "orig_eax",
298 ((char *) buf
) + ORIG_EAX
* 4);
302 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
305 i387_cache_to_fxsave (regcache
, buf
);
307 i387_cache_to_fsave (regcache
, buf
);
312 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
315 i387_fxsave_to_cache (regcache
, buf
);
317 i387_fsave_to_cache (regcache
, buf
);
324 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
326 i387_cache_to_fxsave (regcache
, buf
);
330 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
332 i387_fxsave_to_cache (regcache
, buf
);
338 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
340 i387_cache_to_xsave (regcache
, buf
);
344 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
346 i387_xsave_to_cache (regcache
, buf
);
349 /* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
358 struct regset_info target_regsets
[] =
360 #ifdef HAVE_PTRACE_GETREGS
361 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
363 x86_fill_gregset
, x86_store_gregset
},
364 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
365 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
367 # ifdef HAVE_PTRACE_GETFPXREGS
368 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
370 x86_fill_fpxregset
, x86_store_fpxregset
},
373 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
375 x86_fill_fpregset
, x86_store_fpregset
},
376 #endif /* HAVE_PTRACE_GETREGS */
377 { 0, 0, 0, -1, -1, NULL
, NULL
}
381 x86_get_pc (struct regcache
*regcache
)
383 int use_64bit
= register_size (0) == 8;
388 collect_register_by_name (regcache
, "rip", &pc
);
389 return (CORE_ADDR
) pc
;
394 collect_register_by_name (regcache
, "eip", &pc
);
395 return (CORE_ADDR
) pc
;
400 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
402 int use_64bit
= register_size (0) == 8;
406 unsigned long newpc
= pc
;
407 supply_register_by_name (regcache
, "rip", &newpc
);
411 unsigned int newpc
= pc
;
412 supply_register_by_name (regcache
, "eip", &newpc
);
416 static const unsigned char x86_breakpoint
[] = { 0xCC };
417 #define x86_breakpoint_len 1
420 x86_breakpoint_at (CORE_ADDR pc
)
424 (*the_target
->read_memory
) (pc
, &c
, 1);
431 /* Support for debug registers. */
434 x86_linux_dr_get (ptid_t ptid
, int regnum
)
439 tid
= ptid_get_lwp (ptid
);
442 value
= ptrace (PTRACE_PEEKUSER
, tid
,
443 offsetof (struct user
, u_debugreg
[regnum
]), 0);
445 error ("Couldn't read debug register");
451 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
455 tid
= ptid_get_lwp (ptid
);
458 ptrace (PTRACE_POKEUSER
, tid
,
459 offsetof (struct user
, u_debugreg
[regnum
]), value
);
461 error ("Couldn't write debug register");
465 update_debug_registers_callback (struct inferior_list_entry
*entry
,
468 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
469 int pid
= *(int *) pid_p
;
471 /* Only update the threads of this process. */
472 if (pid_of (lwp
) == pid
)
474 /* The actual update is done later just before resuming the lwp,
475 we just mark that the registers need updating. */
476 lwp
->arch_private
->debug_registers_changed
= 1;
478 /* If the lwp isn't stopped, force it to momentarily pause, so
479 we can update its debug registers. */
481 linux_stop_lwp (lwp
);
487 /* Update the inferior's debug register REGNUM from STATE. */
490 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
492 /* Only update the threads of this process. */
493 int pid
= pid_of (get_thread_lwp (current_inferior
));
495 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
496 fatal ("Invalid debug register %d", regnum
);
498 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
501 /* Return the inferior's debug register REGNUM. */
504 i386_dr_low_get_addr (int regnum
)
506 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
507 ptid_t ptid
= ptid_of (lwp
);
509 /* DR6 and DR7 are retrieved with some other way. */
510 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
< DR_LASTADDR
);
512 return x86_linux_dr_get (ptid
, regnum
);
515 /* Update the inferior's DR7 debug control register from STATE. */
518 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
520 /* Only update the threads of this process. */
521 int pid
= pid_of (get_thread_lwp (current_inferior
));
523 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
526 /* Return the inferior's DR7 debug control register. */
529 i386_dr_low_get_control (void)
531 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
532 ptid_t ptid
= ptid_of (lwp
);
534 return x86_linux_dr_get (ptid
, DR_CONTROL
);
537 /* Get the value of the DR6 debug status register from the inferior
538 and record it in STATE. */
541 i386_dr_low_get_status (void)
543 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
544 ptid_t ptid
= ptid_of (lwp
);
546 return x86_linux_dr_get (ptid
, DR_STATUS
);
549 /* Watchpoint support. */
552 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
554 struct process_info
*proc
= current_process ();
558 return set_gdb_breakpoint_at (addr
);
562 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
571 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
573 struct process_info
*proc
= current_process ();
577 return delete_gdb_breakpoint_at (addr
);
581 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
590 x86_stopped_by_watchpoint (void)
592 struct process_info
*proc
= current_process ();
593 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
597 x86_stopped_data_address (void)
599 struct process_info
*proc
= current_process ();
601 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
607 /* Called when a new process is created. */
609 static struct arch_process_info
*
610 x86_linux_new_process (void)
612 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
614 i386_low_init_dregs (&info
->debug_reg_state
);
619 /* Called when a new thread is detected. */
621 static struct arch_lwp_info
*
622 x86_linux_new_thread (void)
624 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
626 info
->debug_registers_changed
= 1;
631 /* Called when resuming a thread.
632 If the debug regs have changed, update the thread's copies. */
635 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
637 ptid_t ptid
= ptid_of (lwp
);
639 if (lwp
->arch_private
->debug_registers_changed
)
642 int pid
= ptid_get_pid (ptid
);
643 struct process_info
*proc
= find_process_pid (pid
);
644 struct i386_debug_reg_state
*state
= &proc
->private->arch_private
->debug_reg_state
;
646 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
647 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
649 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
651 lwp
->arch_private
->debug_registers_changed
= 0;
654 if (lwp
->stopped_by_watchpoint
)
655 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
658 /* When GDBSERVER is built as a 64-bit application on linux, the
659 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
660 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
661 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
662 conversion in-place ourselves. */
664 /* These types below (compat_*) define a siginfo type that is layout
665 compatible with the siginfo type exported by the 32-bit userspace
670 typedef int compat_int_t
;
671 typedef unsigned int compat_uptr_t
;
673 typedef int compat_time_t
;
674 typedef int compat_timer_t
;
675 typedef int compat_clock_t
;
677 struct compat_timeval
679 compat_time_t tv_sec
;
683 typedef union compat_sigval
685 compat_int_t sival_int
;
686 compat_uptr_t sival_ptr
;
689 typedef struct compat_siginfo
697 int _pad
[((128 / sizeof (int)) - 3)];
706 /* POSIX.1b timers */
711 compat_sigval_t _sigval
;
714 /* POSIX.1b signals */
719 compat_sigval_t _sigval
;
728 compat_clock_t _utime
;
729 compat_clock_t _stime
;
732 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
747 #define cpt_si_pid _sifields._kill._pid
748 #define cpt_si_uid _sifields._kill._uid
749 #define cpt_si_timerid _sifields._timer._tid
750 #define cpt_si_overrun _sifields._timer._overrun
751 #define cpt_si_status _sifields._sigchld._status
752 #define cpt_si_utime _sifields._sigchld._utime
753 #define cpt_si_stime _sifields._sigchld._stime
754 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
755 #define cpt_si_addr _sifields._sigfault._addr
756 #define cpt_si_band _sifields._sigpoll._band
757 #define cpt_si_fd _sifields._sigpoll._fd
759 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
760 In their place is si_timer1,si_timer2. */
762 #define si_timerid si_timer1
765 #define si_overrun si_timer2
769 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
771 memset (to
, 0, sizeof (*to
));
773 to
->si_signo
= from
->si_signo
;
774 to
->si_errno
= from
->si_errno
;
775 to
->si_code
= from
->si_code
;
779 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
781 else if (to
->si_code
== SI_USER
)
783 to
->cpt_si_pid
= from
->si_pid
;
784 to
->cpt_si_uid
= from
->si_uid
;
786 else if (to
->si_code
== SI_TIMER
)
788 to
->cpt_si_timerid
= from
->si_timerid
;
789 to
->cpt_si_overrun
= from
->si_overrun
;
790 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
794 switch (to
->si_signo
)
797 to
->cpt_si_pid
= from
->si_pid
;
798 to
->cpt_si_uid
= from
->si_uid
;
799 to
->cpt_si_status
= from
->si_status
;
800 to
->cpt_si_utime
= from
->si_utime
;
801 to
->cpt_si_stime
= from
->si_stime
;
807 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
810 to
->cpt_si_band
= from
->si_band
;
811 to
->cpt_si_fd
= from
->si_fd
;
814 to
->cpt_si_pid
= from
->si_pid
;
815 to
->cpt_si_uid
= from
->si_uid
;
816 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
823 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
825 memset (to
, 0, sizeof (*to
));
827 to
->si_signo
= from
->si_signo
;
828 to
->si_errno
= from
->si_errno
;
829 to
->si_code
= from
->si_code
;
833 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
835 else if (to
->si_code
== SI_USER
)
837 to
->si_pid
= from
->cpt_si_pid
;
838 to
->si_uid
= from
->cpt_si_uid
;
840 else if (to
->si_code
== SI_TIMER
)
842 to
->si_timerid
= from
->cpt_si_timerid
;
843 to
->si_overrun
= from
->cpt_si_overrun
;
844 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
848 switch (to
->si_signo
)
851 to
->si_pid
= from
->cpt_si_pid
;
852 to
->si_uid
= from
->cpt_si_uid
;
853 to
->si_status
= from
->cpt_si_status
;
854 to
->si_utime
= from
->cpt_si_utime
;
855 to
->si_stime
= from
->cpt_si_stime
;
861 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
864 to
->si_band
= from
->cpt_si_band
;
865 to
->si_fd
= from
->cpt_si_fd
;
868 to
->si_pid
= from
->cpt_si_pid
;
869 to
->si_uid
= from
->cpt_si_uid
;
870 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
876 #endif /* __x86_64__ */
878 /* Convert a native/host siginfo object, into/from the siginfo in the
879 layout of the inferiors' architecture. Returns true if any
880 conversion was done; false otherwise. If DIRECTION is 1, then copy
881 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
885 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
888 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
889 if (register_size (0) == 4)
891 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
892 fatal ("unexpected difference in siginfo");
895 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
897 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
908 /* Update gdbserver_xmltarget. */
911 x86_linux_update_xmltarget (void)
914 struct regset_info
*regset
;
915 static unsigned long long xcr0
;
916 static int have_ptrace_getregset
= -1;
917 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
918 static int have_ptrace_getfpxregs
= -1;
921 if (!current_inferior
)
924 /* Before changing the register cache internal layout or the target
925 regsets, flush the contents of the current valid caches back to
927 regcache_invalidate ();
929 pid
= pid_of (get_thread_lwp (current_inferior
));
931 if (num_xmm_registers
== 8)
932 init_registers_i386_linux ();
934 init_registers_amd64_linux ();
937 # ifdef HAVE_PTRACE_GETFPXREGS
938 if (have_ptrace_getfpxregs
== -1)
940 elf_fpxregset_t fpxregs
;
942 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
944 have_ptrace_getfpxregs
= 0;
945 x86_xcr0
= I386_XSTATE_X87_MASK
;
947 /* Disable PTRACE_GETFPXREGS. */
948 for (regset
= target_regsets
;
949 regset
->fill_function
!= NULL
; regset
++)
950 if (regset
->get_request
== PTRACE_GETFPXREGS
)
957 have_ptrace_getfpxregs
= 1;
960 if (!have_ptrace_getfpxregs
)
962 init_registers_i386_mmx_linux ();
966 init_registers_i386_linux ();
974 if (num_xmm_registers
== 8)
975 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
977 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
979 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
982 x86_xcr0
= I386_XSTATE_SSE_MASK
;
987 /* Check if XSAVE extended state is supported. */
988 if (have_ptrace_getregset
== -1)
990 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
993 iov
.iov_base
= xstateregs
;
994 iov
.iov_len
= sizeof (xstateregs
);
996 /* Check if PTRACE_GETREGSET works. */
997 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1000 have_ptrace_getregset
= 0;
1004 have_ptrace_getregset
= 1;
1006 /* Get XCR0 from XSAVE extended state at byte 464. */
1007 xcr0
= xstateregs
[464 / sizeof (long long)];
1009 /* Use PTRACE_GETREGSET if it is available. */
1010 for (regset
= target_regsets
;
1011 regset
->fill_function
!= NULL
; regset
++)
1012 if (regset
->get_request
== PTRACE_GETREGSET
)
1013 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1014 else if (regset
->type
!= GENERAL_REGS
)
1018 if (have_ptrace_getregset
)
1020 /* AVX is the highest feature we support. */
1021 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1026 /* I386 has 8 xmm regs. */
1027 if (num_xmm_registers
== 8)
1028 init_registers_i386_avx_linux ();
1030 init_registers_amd64_avx_linux ();
1032 init_registers_i386_avx_linux ();
1038 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1039 PTRACE_GETREGSET. */
1042 x86_linux_process_qsupported (const char *query
)
1044 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1045 with "i386" in qSupported query, it supports x86 XML target
1048 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1050 char *copy
= xstrdup (query
+ 13);
1053 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1055 if (strcmp (p
, "i386") == 0)
1065 x86_linux_update_xmltarget ();
1068 /* Initialize gdbserver for the architecture of the inferior. */
1071 x86_arch_setup (void)
1074 int pid
= pid_of (get_thread_lwp (current_inferior
));
1075 char *file
= linux_child_pid_to_exec_file (pid
);
1076 int use_64bit
= elf_64_file_p (file
);
1082 /* This can only happen if /proc/<pid>/exe is unreadable,
1083 but "that can't happen" if we've gotten this far.
1084 Fall through and assume this is a 32-bit program. */
1088 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1089 the_low_target
.num_regs
= -1;
1090 the_low_target
.regmap
= NULL
;
1091 the_low_target
.cannot_fetch_register
= NULL
;
1092 the_low_target
.cannot_store_register
= NULL
;
1094 /* Amd64 has 16 xmm regs. */
1095 num_xmm_registers
= 16;
1097 x86_linux_update_xmltarget ();
1102 /* Ok we have a 32-bit inferior. */
1104 the_low_target
.num_regs
= I386_NUM_REGS
;
1105 the_low_target
.regmap
= i386_regmap
;
1106 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1107 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1109 /* I386 has 8 xmm regs. */
1110 num_xmm_registers
= 8;
1112 x86_linux_update_xmltarget ();
1116 x86_supports_tracepoints (void)
1122 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1124 write_inferior_memory (*to
, buf
, len
);
1129 push_opcode (unsigned char *buf
, char *op
)
1131 unsigned char *buf_org
= buf
;
1136 unsigned long ul
= strtoul (op
, &endptr
, 16);
1145 return buf
- buf_org
;
1150 /* Build a jump pad that saves registers and calls a collection
1151 function. Writes a jump instruction to the jump pad to
1152 JJUMPAD_INSN. The caller is responsible to write it in at the
1153 tracepoint address. */
1156 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1157 CORE_ADDR collector
,
1160 CORE_ADDR
*jump_entry
,
1161 unsigned char *jjump_pad_insn
,
1162 ULONGEST
*jjump_pad_insn_size
,
1163 CORE_ADDR
*adjusted_insn_addr
,
1164 CORE_ADDR
*adjusted_insn_addr_end
)
1166 unsigned char buf
[40];
1168 CORE_ADDR buildaddr
= *jump_entry
;
1170 /* Build the jump pad. */
1172 /* First, do tracepoint data collection. Save registers. */
1174 /* Need to ensure stack pointer saved first. */
1175 buf
[i
++] = 0x54; /* push %rsp */
1176 buf
[i
++] = 0x55; /* push %rbp */
1177 buf
[i
++] = 0x57; /* push %rdi */
1178 buf
[i
++] = 0x56; /* push %rsi */
1179 buf
[i
++] = 0x52; /* push %rdx */
1180 buf
[i
++] = 0x51; /* push %rcx */
1181 buf
[i
++] = 0x53; /* push %rbx */
1182 buf
[i
++] = 0x50; /* push %rax */
1183 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1184 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1185 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1186 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1187 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1188 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1189 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1190 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1191 buf
[i
++] = 0x9c; /* pushfq */
1192 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1194 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1195 i
+= sizeof (unsigned long);
1196 buf
[i
++] = 0x57; /* push %rdi */
1197 append_insns (&buildaddr
, i
, buf
);
1199 /* Stack space for the collecting_t object. */
1201 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1202 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1203 memcpy (buf
+ i
, &tpoint
, 8);
1205 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1206 i
+= push_opcode (&buf
[i
],
1207 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1208 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1209 append_insns (&buildaddr
, i
, buf
);
1213 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1214 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1216 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1217 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1218 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1219 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1220 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1221 append_insns (&buildaddr
, i
, buf
);
1223 /* Set up the gdb_collect call. */
1224 /* At this point, (stack pointer + 0x18) is the base of our saved
1228 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1229 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1231 /* tpoint address may be 64-bit wide. */
1232 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1233 memcpy (buf
+ i
, &tpoint
, 8);
1235 append_insns (&buildaddr
, i
, buf
);
1237 /* The collector function being in the shared library, may be
1238 >31-bits away off the jump pad. */
1240 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1241 memcpy (buf
+ i
, &collector
, 8);
1243 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1244 append_insns (&buildaddr
, i
, buf
);
1246 /* Clear the spin-lock. */
1248 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1249 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1250 memcpy (buf
+ i
, &lockaddr
, 8);
1252 append_insns (&buildaddr
, i
, buf
);
1254 /* Remove stack that had been used for the collect_t object. */
1256 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1257 append_insns (&buildaddr
, i
, buf
);
1259 /* Restore register state. */
1261 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1265 buf
[i
++] = 0x9d; /* popfq */
1266 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1267 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1268 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1269 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1270 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1271 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1272 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1273 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1274 buf
[i
++] = 0x58; /* pop %rax */
1275 buf
[i
++] = 0x5b; /* pop %rbx */
1276 buf
[i
++] = 0x59; /* pop %rcx */
1277 buf
[i
++] = 0x5a; /* pop %rdx */
1278 buf
[i
++] = 0x5e; /* pop %rsi */
1279 buf
[i
++] = 0x5f; /* pop %rdi */
1280 buf
[i
++] = 0x5d; /* pop %rbp */
1281 buf
[i
++] = 0x5c; /* pop %rsp */
1282 append_insns (&buildaddr
, i
, buf
);
1284 /* Now, adjust the original instruction to execute in the jump
1286 *adjusted_insn_addr
= buildaddr
;
1287 relocate_instruction (&buildaddr
, tpaddr
);
1288 *adjusted_insn_addr_end
= buildaddr
;
1290 /* Finally, write a jump back to the program. */
1291 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1292 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1293 memcpy (buf
+ 1, &offset
, 4);
1294 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1296 /* The jump pad is now built. Wire in a jump to our jump pad. This
1297 is always done last (by our caller actually), so that we can
1298 install fast tracepoints with threads running. This relies on
1299 the agent's atomic write support. */
1300 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1301 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1302 memcpy (buf
+ 1, &offset
, 4);
1303 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1304 *jjump_pad_insn_size
= sizeof (jump_insn
);
1306 /* Return the end address of our pad. */
1307 *jump_entry
= buildaddr
;
1312 #endif /* __x86_64__ */
1314 /* Build a jump pad that saves registers and calls a collection
1315 function. Writes a jump instruction to the jump pad to
1316 JJUMPAD_INSN. The caller is responsible to write it in at the
1317 tracepoint address. */
1320 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1321 CORE_ADDR collector
,
1324 CORE_ADDR
*jump_entry
,
1325 unsigned char *jjump_pad_insn
,
1326 ULONGEST
*jjump_pad_insn_size
,
1327 CORE_ADDR
*adjusted_insn_addr
,
1328 CORE_ADDR
*adjusted_insn_addr_end
)
1330 unsigned char buf
[0x100];
1332 CORE_ADDR buildaddr
= *jump_entry
;
1334 /* Build the jump pad. */
1336 /* First, do tracepoint data collection. Save registers. */
1338 buf
[i
++] = 0x60; /* pushad */
1339 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1340 *((int *)(buf
+ i
)) = (int) tpaddr
;
1342 buf
[i
++] = 0x9c; /* pushf */
1343 buf
[i
++] = 0x1e; /* push %ds */
1344 buf
[i
++] = 0x06; /* push %es */
1345 buf
[i
++] = 0x0f; /* push %fs */
1347 buf
[i
++] = 0x0f; /* push %gs */
1349 buf
[i
++] = 0x16; /* push %ss */
1350 buf
[i
++] = 0x0e; /* push %cs */
1351 append_insns (&buildaddr
, i
, buf
);
1353 /* Stack space for the collecting_t object. */
1355 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1357 /* Build the object. */
1358 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1359 memcpy (buf
+ i
, &tpoint
, 4);
1361 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1363 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1364 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1365 append_insns (&buildaddr
, i
, buf
);
1367 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1368 If we cared for it, this could be using xchg alternatively. */
1371 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1372 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1374 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1376 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1377 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1378 append_insns (&buildaddr
, i
, buf
);
1381 /* Set up arguments to the gdb_collect call. */
1383 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1384 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1385 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1386 append_insns (&buildaddr
, i
, buf
);
1389 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1390 append_insns (&buildaddr
, i
, buf
);
1393 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1394 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1396 append_insns (&buildaddr
, i
, buf
);
1398 buf
[0] = 0xe8; /* call <reladdr> */
1399 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1400 memcpy (buf
+ 1, &offset
, 4);
1401 append_insns (&buildaddr
, 5, buf
);
1402 /* Clean up after the call. */
1403 buf
[0] = 0x83; /* add $0x8,%esp */
1406 append_insns (&buildaddr
, 3, buf
);
1409 /* Clear the spin-lock. This would need the LOCK prefix on older
1412 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1413 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1414 memcpy (buf
+ i
, &lockaddr
, 4);
1416 append_insns (&buildaddr
, i
, buf
);
1419 /* Remove stack that had been used for the collect_t object. */
1421 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1422 append_insns (&buildaddr
, i
, buf
);
1425 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1428 buf
[i
++] = 0x17; /* pop %ss */
1429 buf
[i
++] = 0x0f; /* pop %gs */
1431 buf
[i
++] = 0x0f; /* pop %fs */
1433 buf
[i
++] = 0x07; /* pop %es */
1434 buf
[i
++] = 0x1f; /* pop %de */
1435 buf
[i
++] = 0x9d; /* popf */
1436 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1439 buf
[i
++] = 0x61; /* popad */
1440 append_insns (&buildaddr
, i
, buf
);
1442 /* Now, adjust the original instruction to execute in the jump
1444 *adjusted_insn_addr
= buildaddr
;
1445 relocate_instruction (&buildaddr
, tpaddr
);
1446 *adjusted_insn_addr_end
= buildaddr
;
1448 /* Write the jump back to the program. */
1449 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1450 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1451 memcpy (buf
+ 1, &offset
, 4);
1452 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1454 /* The jump pad is now built. Wire in a jump to our jump pad. This
1455 is always done last (by our caller actually), so that we can
1456 install fast tracepoints with threads running. This relies on
1457 the agent's atomic write support. */
1458 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1459 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1460 memcpy (buf
+ 1, &offset
, 4);
1461 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1462 *jjump_pad_insn_size
= sizeof (jump_insn
);
1464 /* Return the end address of our pad. */
1465 *jump_entry
= buildaddr
;
1471 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1472 CORE_ADDR collector
,
1475 CORE_ADDR
*jump_entry
,
1476 unsigned char *jjump_pad_insn
,
1477 ULONGEST
*jjump_pad_insn_size
,
1478 CORE_ADDR
*adjusted_insn_addr
,
1479 CORE_ADDR
*adjusted_insn_addr_end
)
1482 if (register_size (0) == 8)
1483 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1484 collector
, lockaddr
,
1485 orig_size
, jump_entry
,
1487 jjump_pad_insn_size
,
1489 adjusted_insn_addr_end
);
1492 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1493 collector
, lockaddr
,
1494 orig_size
, jump_entry
,
1496 jjump_pad_insn_size
,
1498 adjusted_insn_addr_end
);
1502 add_insns (unsigned char *start
, int len
)
1504 CORE_ADDR buildaddr
= current_insn_ptr
;
1507 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1508 len
, paddress (buildaddr
));
1510 append_insns (&buildaddr
, len
, start
);
1511 current_insn_ptr
= buildaddr
;
1514 /* Our general strategy for emitting code is to avoid specifying raw
1515 bytes whenever possible, and instead copy a block of inline asm
1516 that is embedded in the function. This is a little messy, because
1517 we need to keep the compiler from discarding what looks like dead
1518 code, plus suppress various warnings. */
1520 #define EMIT_ASM(NAME, INSNS) \
1523 extern unsigned char start_ ## NAME, end_ ## NAME; \
1524 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1525 __asm__ ("jmp end_" #NAME "\n" \
1526 "\t" "start_" #NAME ":" \
1528 "\t" "end_" #NAME ":"); \
1533 #define EMIT_ASM32(NAME,INSNS) \
1536 extern unsigned char start_ ## NAME, end_ ## NAME; \
1537 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1538 __asm__ (".code32\n" \
1539 "\t" "jmp end_" #NAME "\n" \
1540 "\t" "start_" #NAME ":\n" \
1542 "\t" "end_" #NAME ":\n" \
1548 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1555 amd64_emit_prologue (void)
1557 EMIT_ASM (amd64_prologue
,
1559 "movq %rsp,%rbp\n\t"
1560 "sub $0x20,%rsp\n\t"
1561 "movq %rdi,-8(%rbp)\n\t"
1562 "movq %rsi,-16(%rbp)");
1567 amd64_emit_epilogue (void)
1569 EMIT_ASM (amd64_epilogue
,
1570 "movq -16(%rbp),%rdi\n\t"
1571 "movq %rax,(%rdi)\n\t"
1578 amd64_emit_add (void)
1580 EMIT_ASM (amd64_add
,
1581 "add (%rsp),%rax\n\t"
1582 "lea 0x8(%rsp),%rsp");
1586 amd64_emit_sub (void)
1588 EMIT_ASM (amd64_sub
,
1589 "sub %rax,(%rsp)\n\t"
1594 amd64_emit_mul (void)
1600 amd64_emit_lsh (void)
1606 amd64_emit_rsh_signed (void)
1612 amd64_emit_rsh_unsigned (void)
1618 amd64_emit_ext (int arg
)
1623 EMIT_ASM (amd64_ext_8
,
1629 EMIT_ASM (amd64_ext_16
,
1634 EMIT_ASM (amd64_ext_32
,
1643 amd64_emit_log_not (void)
1645 EMIT_ASM (amd64_log_not
,
1646 "test %rax,%rax\n\t"
1652 amd64_emit_bit_and (void)
1654 EMIT_ASM (amd64_and
,
1655 "and (%rsp),%rax\n\t"
1656 "lea 0x8(%rsp),%rsp");
1660 amd64_emit_bit_or (void)
1663 "or (%rsp),%rax\n\t"
1664 "lea 0x8(%rsp),%rsp");
1668 amd64_emit_bit_xor (void)
1670 EMIT_ASM (amd64_xor
,
1671 "xor (%rsp),%rax\n\t"
1672 "lea 0x8(%rsp),%rsp");
1676 amd64_emit_bit_not (void)
1678 EMIT_ASM (amd64_bit_not
,
1679 "xorq $0xffffffffffffffff,%rax");
1683 amd64_emit_equal (void)
1685 EMIT_ASM (amd64_equal
,
1686 "cmp %rax,(%rsp)\n\t"
1687 "je .Lamd64_equal_true\n\t"
1689 "jmp .Lamd64_equal_end\n\t"
1690 ".Lamd64_equal_true:\n\t"
1692 ".Lamd64_equal_end:\n\t"
1693 "lea 0x8(%rsp),%rsp");
1697 amd64_emit_less_signed (void)
1699 EMIT_ASM (amd64_less_signed
,
1700 "cmp %rax,(%rsp)\n\t"
1701 "jl .Lamd64_less_signed_true\n\t"
1703 "jmp .Lamd64_less_signed_end\n\t"
1704 ".Lamd64_less_signed_true:\n\t"
1706 ".Lamd64_less_signed_end:\n\t"
1707 "lea 0x8(%rsp),%rsp");
1711 amd64_emit_less_unsigned (void)
1713 EMIT_ASM (amd64_less_unsigned
,
1714 "cmp %rax,(%rsp)\n\t"
1715 "jb .Lamd64_less_unsigned_true\n\t"
1717 "jmp .Lamd64_less_unsigned_end\n\t"
1718 ".Lamd64_less_unsigned_true:\n\t"
1720 ".Lamd64_less_unsigned_end:\n\t"
1721 "lea 0x8(%rsp),%rsp");
1725 amd64_emit_ref (int size
)
1730 EMIT_ASM (amd64_ref1
,
1734 EMIT_ASM (amd64_ref2
,
1738 EMIT_ASM (amd64_ref4
,
1739 "movl (%rax),%eax");
1742 EMIT_ASM (amd64_ref8
,
1743 "movq (%rax),%rax");
1749 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1751 EMIT_ASM (amd64_if_goto
,
1755 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1763 amd64_emit_goto (int *offset_p
, int *size_p
)
1765 EMIT_ASM (amd64_goto
,
1766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1774 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1776 int diff
= (to
- (from
+ size
));
1777 unsigned char buf
[sizeof (int)];
1785 memcpy (buf
, &diff
, sizeof (int));
1786 write_inferior_memory (from
, buf
, sizeof (int));
1790 amd64_emit_const (LONGEST num
)
1792 unsigned char buf
[16];
1794 CORE_ADDR buildaddr
= current_insn_ptr
;
1797 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1798 *((LONGEST
*) (&buf
[i
])) = num
;
1800 append_insns (&buildaddr
, i
, buf
);
1801 current_insn_ptr
= buildaddr
;
1805 amd64_emit_call (CORE_ADDR fn
)
1807 unsigned char buf
[16];
1809 CORE_ADDR buildaddr
;
1812 /* The destination function being in the shared library, may be
1813 >31-bits away off the compiled code pad. */
1815 buildaddr
= current_insn_ptr
;
1817 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1821 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1823 /* Offset is too large for a call. Use callq, but that requires
1824 a register, so avoid it if possible. Use r10, since it is
1825 call-clobbered, we don't have to push/pop it. */
1826 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1828 memcpy (buf
+ i
, &fn
, 8);
1830 buf
[i
++] = 0xff; /* callq *%r10 */
1835 int offset32
= offset64
; /* we know we can't overflow here. */
1836 memcpy (buf
+ i
, &offset32
, 4);
1840 append_insns (&buildaddr
, i
, buf
);
1841 current_insn_ptr
= buildaddr
;
1845 amd64_emit_reg (int reg
)
1847 unsigned char buf
[16];
1849 CORE_ADDR buildaddr
;
1851 /* Assume raw_regs is still in %rdi. */
1852 buildaddr
= current_insn_ptr
;
1854 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1855 *((int *) (&buf
[i
])) = reg
;
1857 append_insns (&buildaddr
, i
, buf
);
1858 current_insn_ptr
= buildaddr
;
1859 amd64_emit_call (get_raw_reg_func_addr ());
1863 amd64_emit_pop (void)
1865 EMIT_ASM (amd64_pop
,
1870 amd64_emit_stack_flush (void)
1872 EMIT_ASM (amd64_stack_flush
,
1877 amd64_emit_zero_ext (int arg
)
1882 EMIT_ASM (amd64_zero_ext_8
,
1886 EMIT_ASM (amd64_zero_ext_16
,
1887 "and $0xffff,%rax");
1890 EMIT_ASM (amd64_zero_ext_32
,
1891 "mov $0xffffffff,%rcx\n\t"
1900 amd64_emit_swap (void)
1902 EMIT_ASM (amd64_swap
,
1909 amd64_emit_stack_adjust (int n
)
1911 unsigned char buf
[16];
1913 CORE_ADDR buildaddr
= current_insn_ptr
;
1916 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1920 /* This only handles adjustments up to 16, but we don't expect any more. */
1922 append_insns (&buildaddr
, i
, buf
);
1923 current_insn_ptr
= buildaddr
;
1926 /* FN's prototype is `LONGEST(*fn)(int)'. */
1929 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1931 unsigned char buf
[16];
1933 CORE_ADDR buildaddr
;
1935 buildaddr
= current_insn_ptr
;
1937 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1938 *((int *) (&buf
[i
])) = arg1
;
1940 append_insns (&buildaddr
, i
, buf
);
1941 current_insn_ptr
= buildaddr
;
1942 amd64_emit_call (fn
);
1945 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1948 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1950 unsigned char buf
[16];
1952 CORE_ADDR buildaddr
;
1954 buildaddr
= current_insn_ptr
;
1956 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1957 *((int *) (&buf
[i
])) = arg1
;
1959 append_insns (&buildaddr
, i
, buf
);
1960 current_insn_ptr
= buildaddr
;
1961 EMIT_ASM (amd64_void_call_2_a
,
1962 /* Save away a copy of the stack top. */
1964 /* Also pass top as the second argument. */
1966 amd64_emit_call (fn
);
1967 EMIT_ASM (amd64_void_call_2_b
,
1968 /* Restore the stack top, %rax may have been trashed. */
1972 struct emit_ops amd64_emit_ops
=
1974 amd64_emit_prologue
,
1975 amd64_emit_epilogue
,
1980 amd64_emit_rsh_signed
,
1981 amd64_emit_rsh_unsigned
,
1989 amd64_emit_less_signed
,
1990 amd64_emit_less_unsigned
,
1994 amd64_write_goto_address
,
1999 amd64_emit_stack_flush
,
2000 amd64_emit_zero_ext
,
2002 amd64_emit_stack_adjust
,
2003 amd64_emit_int_call_1
,
2004 amd64_emit_void_call_2
2007 #endif /* __x86_64__ */
2010 i386_emit_prologue (void)
2012 EMIT_ASM32 (i386_prologue
,
2015 /* At this point, the raw regs base address is at 8(%ebp), and the
2016 value pointer is at 12(%ebp). */
2020 i386_emit_epilogue (void)
2022 EMIT_ASM32 (i386_epilogue
,
2023 "mov 12(%ebp),%ecx\n\t"
2024 "mov %eax,(%ecx)\n\t"
2025 "mov %ebx,0x4(%ecx)\n\t"
2032 i386_emit_add (void)
2034 EMIT_ASM32 (i386_add
,
2035 "add (%esp),%eax\n\t"
2036 "adc 0x4(%esp),%ebx\n\t"
2037 "lea 0x8(%esp),%esp");
2041 i386_emit_sub (void)
2043 EMIT_ASM32 (i386_sub
,
2044 "subl %eax,(%esp)\n\t"
2045 "sbbl %ebx,4(%esp)\n\t"
2051 i386_emit_mul (void)
2057 i386_emit_lsh (void)
2063 i386_emit_rsh_signed (void)
2069 i386_emit_rsh_unsigned (void)
2075 i386_emit_ext (int arg
)
2080 EMIT_ASM32 (i386_ext_8
,
2083 "movl %eax,%ebx\n\t"
2087 EMIT_ASM32 (i386_ext_16
,
2089 "movl %eax,%ebx\n\t"
2093 EMIT_ASM32 (i386_ext_32
,
2094 "movl %eax,%ebx\n\t"
2103 i386_emit_log_not (void)
2105 EMIT_ASM32 (i386_log_not
,
2107 "test %eax,%eax\n\t"
2114 i386_emit_bit_and (void)
2116 EMIT_ASM32 (i386_and
,
2117 "and (%esp),%eax\n\t"
2118 "and 0x4(%esp),%ebx\n\t"
2119 "lea 0x8(%esp),%esp");
2123 i386_emit_bit_or (void)
2125 EMIT_ASM32 (i386_or
,
2126 "or (%esp),%eax\n\t"
2127 "or 0x4(%esp),%ebx\n\t"
2128 "lea 0x8(%esp),%esp");
2132 i386_emit_bit_xor (void)
2134 EMIT_ASM32 (i386_xor
,
2135 "xor (%esp),%eax\n\t"
2136 "xor 0x4(%esp),%ebx\n\t"
2137 "lea 0x8(%esp),%esp");
2141 i386_emit_bit_not (void)
2143 EMIT_ASM32 (i386_bit_not
,
2144 "xor $0xffffffff,%eax\n\t"
2145 "xor $0xffffffff,%ebx\n\t");
2149 i386_emit_equal (void)
2151 EMIT_ASM32 (i386_equal
,
2152 "cmpl %ebx,4(%esp)\n\t"
2153 "jne .Li386_equal_false\n\t"
2154 "cmpl %eax,(%esp)\n\t"
2155 "je .Li386_equal_true\n\t"
2156 ".Li386_equal_false:\n\t"
2158 "jmp .Li386_equal_end\n\t"
2159 ".Li386_equal_true:\n\t"
2161 ".Li386_equal_end:\n\t"
2163 "lea 0x8(%esp),%esp");
2167 i386_emit_less_signed (void)
2169 EMIT_ASM32 (i386_less_signed
,
2170 "cmpl %ebx,4(%esp)\n\t"
2171 "jl .Li386_less_signed_true\n\t"
2172 "jne .Li386_less_signed_false\n\t"
2173 "cmpl %eax,(%esp)\n\t"
2174 "jl .Li386_less_signed_true\n\t"
2175 ".Li386_less_signed_false:\n\t"
2177 "jmp .Li386_less_signed_end\n\t"
2178 ".Li386_less_signed_true:\n\t"
2180 ".Li386_less_signed_end:\n\t"
2182 "lea 0x8(%esp),%esp");
2186 i386_emit_less_unsigned (void)
2188 EMIT_ASM32 (i386_less_unsigned
,
2189 "cmpl %ebx,4(%esp)\n\t"
2190 "jb .Li386_less_unsigned_true\n\t"
2191 "jne .Li386_less_unsigned_false\n\t"
2192 "cmpl %eax,(%esp)\n\t"
2193 "jb .Li386_less_unsigned_true\n\t"
2194 ".Li386_less_unsigned_false:\n\t"
2196 "jmp .Li386_less_unsigned_end\n\t"
2197 ".Li386_less_unsigned_true:\n\t"
2199 ".Li386_less_unsigned_end:\n\t"
2201 "lea 0x8(%esp),%esp");
2205 i386_emit_ref (int size
)
2210 EMIT_ASM32 (i386_ref1
,
2214 EMIT_ASM32 (i386_ref2
,
2218 EMIT_ASM32 (i386_ref4
,
2219 "movl (%eax),%eax");
2222 EMIT_ASM32 (i386_ref8
,
2223 "movl 4(%eax),%ebx\n\t"
2224 "movl (%eax),%eax");
2230 i386_emit_if_goto (int *offset_p
, int *size_p
)
2232 EMIT_ASM32 (i386_if_goto
,
2238 /* Don't trust the assembler to choose the right jump */
2239 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2242 *offset_p
= 11; /* be sure that this matches the sequence above */
2248 i386_emit_goto (int *offset_p
, int *size_p
)
2250 EMIT_ASM32 (i386_goto
,
2251 /* Don't trust the assembler to choose the right jump */
2252 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2260 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2262 int diff
= (to
- (from
+ size
));
2263 unsigned char buf
[sizeof (int)];
2265 /* We're only doing 4-byte sizes at the moment. */
2272 memcpy (buf
, &diff
, sizeof (int));
2273 write_inferior_memory (from
, buf
, sizeof (int));
2277 i386_emit_const (LONGEST num
)
2279 unsigned char buf
[16];
2281 CORE_ADDR buildaddr
= current_insn_ptr
;
2284 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2285 *((int *) (&buf
[i
])) = (num
& 0xffffffff);
2287 hi
= ((num
>> 32) & 0xffffffff);
2290 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2291 *((int *) (&buf
[i
])) = hi
;
2296 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2298 append_insns (&buildaddr
, i
, buf
);
2299 current_insn_ptr
= buildaddr
;
2303 i386_emit_call (CORE_ADDR fn
)
2305 unsigned char buf
[16];
2307 CORE_ADDR buildaddr
;
2309 buildaddr
= current_insn_ptr
;
2311 buf
[i
++] = 0xe8; /* call <reladdr> */
2312 offset
= ((int) fn
) - (buildaddr
+ 5);
2313 memcpy (buf
+ 1, &offset
, 4);
2314 append_insns (&buildaddr
, 5, buf
);
2315 current_insn_ptr
= buildaddr
;
2319 i386_emit_reg (int reg
)
2321 unsigned char buf
[16];
2323 CORE_ADDR buildaddr
;
2325 EMIT_ASM32 (i386_reg_a
,
2327 buildaddr
= current_insn_ptr
;
2329 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2330 *((int *) (&buf
[i
])) = reg
;
2332 append_insns (&buildaddr
, i
, buf
);
2333 current_insn_ptr
= buildaddr
;
2334 EMIT_ASM32 (i386_reg_b
,
2335 "mov %eax,4(%esp)\n\t"
2336 "mov 8(%ebp),%eax\n\t"
2338 i386_emit_call (get_raw_reg_func_addr ());
2339 EMIT_ASM32 (i386_reg_c
,
2341 "lea 0x8(%esp),%esp");
2345 i386_emit_pop (void)
2347 EMIT_ASM32 (i386_pop
,
2353 i386_emit_stack_flush (void)
2355 EMIT_ASM32 (i386_stack_flush
,
2361 i386_emit_zero_ext (int arg
)
2366 EMIT_ASM32 (i386_zero_ext_8
,
2367 "and $0xff,%eax\n\t"
2371 EMIT_ASM32 (i386_zero_ext_16
,
2372 "and $0xffff,%eax\n\t"
2376 EMIT_ASM32 (i386_zero_ext_32
,
2385 i386_emit_swap (void)
2387 EMIT_ASM32 (i386_swap
,
2397 i386_emit_stack_adjust (int n
)
2399 unsigned char buf
[16];
2401 CORE_ADDR buildaddr
= current_insn_ptr
;
2404 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2408 append_insns (&buildaddr
, i
, buf
);
2409 current_insn_ptr
= buildaddr
;
2412 /* FN's prototype is `LONGEST(*fn)(int)'. */
2415 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2417 unsigned char buf
[16];
2419 CORE_ADDR buildaddr
;
2421 EMIT_ASM32 (i386_int_call_1_a
,
2422 /* Reserve a bit of stack space. */
2424 /* Put the one argument on the stack. */
2425 buildaddr
= current_insn_ptr
;
2427 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2430 *((int *) (&buf
[i
])) = arg1
;
2432 append_insns (&buildaddr
, i
, buf
);
2433 current_insn_ptr
= buildaddr
;
2434 i386_emit_call (fn
);
2435 EMIT_ASM32 (i386_int_call_1_c
,
2437 "lea 0x8(%esp),%esp");
2440 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2443 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2445 unsigned char buf
[16];
2447 CORE_ADDR buildaddr
;
2449 EMIT_ASM32 (i386_void_call_2_a
,
2450 /* Preserve %eax only; we don't have to worry about %ebx. */
2452 /* Reserve a bit of stack space for arguments. */
2453 "sub $0x10,%esp\n\t"
2454 /* Copy "top" to the second argument position. (Note that
2455 we can't assume function won't scribble on its
2456 arguments, so don't try to restore from this.) */
2457 "mov %eax,4(%esp)\n\t"
2458 "mov %ebx,8(%esp)");
2459 /* Put the first argument on the stack. */
2460 buildaddr
= current_insn_ptr
;
2462 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2465 *((int *) (&buf
[i
])) = arg1
;
2467 append_insns (&buildaddr
, i
, buf
);
2468 current_insn_ptr
= buildaddr
;
2469 i386_emit_call (fn
);
2470 EMIT_ASM32 (i386_void_call_2_b
,
2471 "lea 0x10(%esp),%esp\n\t"
2472 /* Restore original stack top. */
2476 struct emit_ops i386_emit_ops
=
2484 i386_emit_rsh_signed
,
2485 i386_emit_rsh_unsigned
,
2493 i386_emit_less_signed
,
2494 i386_emit_less_unsigned
,
2498 i386_write_goto_address
,
2503 i386_emit_stack_flush
,
2506 i386_emit_stack_adjust
,
2507 i386_emit_int_call_1
,
2508 i386_emit_void_call_2
2512 static struct emit_ops
*
2516 int use_64bit
= register_size (0) == 8;
2519 return &amd64_emit_ops
;
2522 return &i386_emit_ops
;
2525 /* This is initialized assuming an amd64 target.
2526 x86_arch_setup will correct it for i386 or amd64 targets. */
2528 struct linux_target_ops the_low_target
=
2544 x86_stopped_by_watchpoint
,
2545 x86_stopped_data_address
,
2546 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2547 native i386 case (no registers smaller than an xfer unit), and are not
2548 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2551 /* need to fix up i386 siginfo if host is amd64 */
2553 x86_linux_new_process
,
2554 x86_linux_new_thread
,
2555 x86_linux_prepare_to_resume
,
2556 x86_linux_process_qsupported
,
2557 x86_supports_tracepoints
,
2558 x86_get_thread_area
,
2559 x86_install_fast_tracepoint_jump_pad
,