1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
45 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
47 /* Backward compatibility for gdb without XML support. */
49 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
50 <architecture>i386</architecture>\
51 <osabi>GNU/Linux</osabi>\
55 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
56 <architecture>i386:x86-64</architecture>\
57 <osabi>GNU/Linux</osabi>\
62 #include <sys/procfs.h>
63 #include <sys/ptrace.h>
66 #ifndef PTRACE_GETREGSET
67 #define PTRACE_GETREGSET 0x4204
70 #ifndef PTRACE_SETREGSET
71 #define PTRACE_SETREGSET 0x4205
75 #ifndef PTRACE_GET_THREAD_AREA
76 #define PTRACE_GET_THREAD_AREA 25
79 /* This definition comes from prctl.h, but some kernels may not have it. */
80 #ifndef PTRACE_ARCH_PRCTL
81 #define PTRACE_ARCH_PRCTL 30
84 /* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
87 #define ARCH_SET_GS 0x1001
88 #define ARCH_SET_FS 0x1002
89 #define ARCH_GET_FS 0x1003
90 #define ARCH_GET_GS 0x1004
93 /* Per-process arch-specific data we want to keep. */
95 struct arch_process_info
97 struct i386_debug_reg_state debug_reg_state
;
100 /* Per-thread arch-specific data we want to keep. */
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed
;
110 /* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113 static /*const*/ int i386_regmap
[] =
115 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
116 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
117 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
118 DS
* 8, ES
* 8, FS
* 8, GS
* 8
121 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
123 /* So code below doesn't have to care, i386 or amd64. */
124 #define ORIG_EAX ORIG_RAX
126 static const int x86_64_regmap
[] =
128 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
129 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
130 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
131 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
132 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
133 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
141 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
143 #else /* ! __x86_64__ */
145 /* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147 static /*const*/ int i386_regmap
[] =
149 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
150 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
151 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
152 DS
* 4, ES
* 4, FS
* 4, GS
* 4
155 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
159 /* Called by libthread_db. */
162 ps_get_thread_area (const struct ps_prochandle
*ph
,
163 lwpid_t lwpid
, int idx
, void **base
)
166 int use_64bit
= register_size (0) == 8;
173 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
177 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
188 unsigned int desc
[4];
190 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
191 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
194 *(int *)base
= desc
[1];
199 /* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
205 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
208 int use_64bit
= register_size (0) == 8;
213 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
215 *addr
= (CORE_ADDR
) (uintptr_t) base
;
224 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
225 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
226 unsigned int desc
[4];
228 const int reg_thread_area
= 3; /* bits to scale down register value. */
231 collect_register_by_name (regcache
, "gs", &gs
);
233 idx
= gs
>> reg_thread_area
;
235 if (ptrace (PTRACE_GET_THREAD_AREA
,
237 (void *) (long) idx
, (unsigned long) &desc
) < 0)
248 i386_cannot_store_register (int regno
)
250 return regno
>= I386_NUM_REGS
;
254 i386_cannot_fetch_register (int regno
)
256 return regno
>= I386_NUM_REGS
;
260 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
265 if (register_size (0) == 8)
267 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
268 if (x86_64_regmap
[i
] != -1)
269 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
274 for (i
= 0; i
< I386_NUM_REGS
; i
++)
275 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
277 collect_register_by_name (regcache
, "orig_eax",
278 ((char *) buf
) + ORIG_EAX
* 4);
282 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
287 if (register_size (0) == 8)
289 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
290 if (x86_64_regmap
[i
] != -1)
291 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
296 for (i
= 0; i
< I386_NUM_REGS
; i
++)
297 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
299 supply_register_by_name (regcache
, "orig_eax",
300 ((char *) buf
) + ORIG_EAX
* 4);
304 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
307 i387_cache_to_fxsave (regcache
, buf
);
309 i387_cache_to_fsave (regcache
, buf
);
314 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
317 i387_fxsave_to_cache (regcache
, buf
);
319 i387_fsave_to_cache (regcache
, buf
);
326 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
328 i387_cache_to_fxsave (regcache
, buf
);
332 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
334 i387_fxsave_to_cache (regcache
, buf
);
340 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
342 i387_cache_to_xsave (regcache
, buf
);
346 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
348 i387_xsave_to_cache (regcache
, buf
);
351 /* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
360 struct regset_info target_regsets
[] =
362 #ifdef HAVE_PTRACE_GETREGS
363 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
365 x86_fill_gregset
, x86_store_gregset
},
366 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
367 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
369 # ifdef HAVE_PTRACE_GETFPXREGS
370 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
372 x86_fill_fpxregset
, x86_store_fpxregset
},
375 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
377 x86_fill_fpregset
, x86_store_fpregset
},
378 #endif /* HAVE_PTRACE_GETREGS */
379 { 0, 0, 0, -1, -1, NULL
, NULL
}
383 x86_get_pc (struct regcache
*regcache
)
385 int use_64bit
= register_size (0) == 8;
390 collect_register_by_name (regcache
, "rip", &pc
);
391 return (CORE_ADDR
) pc
;
396 collect_register_by_name (regcache
, "eip", &pc
);
397 return (CORE_ADDR
) pc
;
402 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
404 int use_64bit
= register_size (0) == 8;
408 unsigned long newpc
= pc
;
409 supply_register_by_name (regcache
, "rip", &newpc
);
413 unsigned int newpc
= pc
;
414 supply_register_by_name (regcache
, "eip", &newpc
);
418 static const unsigned char x86_breakpoint
[] = { 0xCC };
419 #define x86_breakpoint_len 1
422 x86_breakpoint_at (CORE_ADDR pc
)
426 (*the_target
->read_memory
) (pc
, &c
, 1);
433 /* Support for debug registers. */
436 x86_linux_dr_get (ptid_t ptid
, int regnum
)
441 tid
= ptid_get_lwp (ptid
);
444 value
= ptrace (PTRACE_PEEKUSER
, tid
,
445 offsetof (struct user
, u_debugreg
[regnum
]), 0);
447 error ("Couldn't read debug register");
453 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
457 tid
= ptid_get_lwp (ptid
);
460 ptrace (PTRACE_POKEUSER
, tid
,
461 offsetof (struct user
, u_debugreg
[regnum
]), value
);
463 error ("Couldn't write debug register");
467 update_debug_registers_callback (struct inferior_list_entry
*entry
,
470 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
471 int pid
= *(int *) pid_p
;
473 /* Only update the threads of this process. */
474 if (pid_of (lwp
) == pid
)
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp
->arch_private
->debug_registers_changed
= 1;
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
483 linux_stop_lwp (lwp
);
489 /* Update the inferior's debug register REGNUM from STATE. */
492 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
494 /* Only update the threads of this process. */
495 int pid
= pid_of (get_thread_lwp (current_inferior
));
497 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
498 fatal ("Invalid debug register %d", regnum
);
500 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
503 /* Return the inferior's debug register REGNUM. */
506 i386_dr_low_get_addr (int regnum
)
508 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
509 ptid_t ptid
= ptid_of (lwp
);
511 /* DR6 and DR7 are retrieved with some other way. */
512 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
514 return x86_linux_dr_get (ptid
, regnum
);
517 /* Update the inferior's DR7 debug control register from STATE. */
520 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
522 /* Only update the threads of this process. */
523 int pid
= pid_of (get_thread_lwp (current_inferior
));
525 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
528 /* Return the inferior's DR7 debug control register. */
531 i386_dr_low_get_control (void)
533 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
534 ptid_t ptid
= ptid_of (lwp
);
536 return x86_linux_dr_get (ptid
, DR_CONTROL
);
539 /* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
543 i386_dr_low_get_status (void)
545 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
546 ptid_t ptid
= ptid_of (lwp
);
548 return x86_linux_dr_get (ptid
, DR_STATUS
);
551 /* Breakpoint/Watchpoint support. */
554 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
556 struct process_info
*proc
= current_process ();
563 ret
= prepare_to_access_memory ();
566 ret
= set_gdb_breakpoint_at (addr
);
567 done_accessing_memory ();
573 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
582 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
584 struct process_info
*proc
= current_process ();
591 ret
= prepare_to_access_memory ();
594 ret
= delete_gdb_breakpoint_at (addr
);
595 done_accessing_memory ();
601 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
610 x86_stopped_by_watchpoint (void)
612 struct process_info
*proc
= current_process ();
613 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
617 x86_stopped_data_address (void)
619 struct process_info
*proc
= current_process ();
621 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
627 /* Called when a new process is created. */
629 static struct arch_process_info
*
630 x86_linux_new_process (void)
632 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
634 i386_low_init_dregs (&info
->debug_reg_state
);
639 /* Called when a new thread is detected. */
641 static struct arch_lwp_info
*
642 x86_linux_new_thread (void)
644 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
646 info
->debug_registers_changed
= 1;
651 /* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
655 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
657 ptid_t ptid
= ptid_of (lwp
);
659 if (lwp
->arch_private
->debug_registers_changed
)
662 int pid
= ptid_get_pid (ptid
);
663 struct process_info
*proc
= find_process_pid (pid
);
664 struct i386_debug_reg_state
*state
665 = &proc
->private->arch_private
->debug_reg_state
;
667 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
668 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
670 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
672 lwp
->arch_private
->debug_registers_changed
= 0;
675 if (lwp
->stopped_by_watchpoint
)
676 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
679 /* When GDBSERVER is built as a 64-bit application on linux, the
680 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
681 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
682 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
683 conversion in-place ourselves. */
685 /* These types below (compat_*) define a siginfo type that is layout
686 compatible with the siginfo type exported by the 32-bit userspace
691 typedef int compat_int_t
;
692 typedef unsigned int compat_uptr_t
;
694 typedef int compat_time_t
;
695 typedef int compat_timer_t
;
696 typedef int compat_clock_t
;
698 struct compat_timeval
700 compat_time_t tv_sec
;
704 typedef union compat_sigval
706 compat_int_t sival_int
;
707 compat_uptr_t sival_ptr
;
710 typedef struct compat_siginfo
718 int _pad
[((128 / sizeof (int)) - 3)];
727 /* POSIX.1b timers */
732 compat_sigval_t _sigval
;
735 /* POSIX.1b signals */
740 compat_sigval_t _sigval
;
749 compat_clock_t _utime
;
750 compat_clock_t _stime
;
753 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
768 #define cpt_si_pid _sifields._kill._pid
769 #define cpt_si_uid _sifields._kill._uid
770 #define cpt_si_timerid _sifields._timer._tid
771 #define cpt_si_overrun _sifields._timer._overrun
772 #define cpt_si_status _sifields._sigchld._status
773 #define cpt_si_utime _sifields._sigchld._utime
774 #define cpt_si_stime _sifields._sigchld._stime
775 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
776 #define cpt_si_addr _sifields._sigfault._addr
777 #define cpt_si_band _sifields._sigpoll._band
778 #define cpt_si_fd _sifields._sigpoll._fd
780 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
781 In their place is si_timer1,si_timer2. */
783 #define si_timerid si_timer1
786 #define si_overrun si_timer2
790 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
792 memset (to
, 0, sizeof (*to
));
794 to
->si_signo
= from
->si_signo
;
795 to
->si_errno
= from
->si_errno
;
796 to
->si_code
= from
->si_code
;
798 if (to
->si_code
== SI_TIMER
)
800 to
->cpt_si_timerid
= from
->si_timerid
;
801 to
->cpt_si_overrun
= from
->si_overrun
;
802 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
804 else if (to
->si_code
== SI_USER
)
806 to
->cpt_si_pid
= from
->si_pid
;
807 to
->cpt_si_uid
= from
->si_uid
;
809 else if (to
->si_code
< 0)
811 to
->cpt_si_pid
= from
->si_pid
;
812 to
->cpt_si_uid
= from
->si_uid
;
813 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
817 switch (to
->si_signo
)
820 to
->cpt_si_pid
= from
->si_pid
;
821 to
->cpt_si_uid
= from
->si_uid
;
822 to
->cpt_si_status
= from
->si_status
;
823 to
->cpt_si_utime
= from
->si_utime
;
824 to
->cpt_si_stime
= from
->si_stime
;
830 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
833 to
->cpt_si_band
= from
->si_band
;
834 to
->cpt_si_fd
= from
->si_fd
;
837 to
->cpt_si_pid
= from
->si_pid
;
838 to
->cpt_si_uid
= from
->si_uid
;
839 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
846 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
848 memset (to
, 0, sizeof (*to
));
850 to
->si_signo
= from
->si_signo
;
851 to
->si_errno
= from
->si_errno
;
852 to
->si_code
= from
->si_code
;
854 if (to
->si_code
== SI_TIMER
)
856 to
->si_timerid
= from
->cpt_si_timerid
;
857 to
->si_overrun
= from
->cpt_si_overrun
;
858 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
860 else if (to
->si_code
== SI_USER
)
862 to
->si_pid
= from
->cpt_si_pid
;
863 to
->si_uid
= from
->cpt_si_uid
;
865 else if (to
->si_code
< 0)
867 to
->si_pid
= from
->cpt_si_pid
;
868 to
->si_uid
= from
->cpt_si_uid
;
869 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
873 switch (to
->si_signo
)
876 to
->si_pid
= from
->cpt_si_pid
;
877 to
->si_uid
= from
->cpt_si_uid
;
878 to
->si_status
= from
->cpt_si_status
;
879 to
->si_utime
= from
->cpt_si_utime
;
880 to
->si_stime
= from
->cpt_si_stime
;
886 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
889 to
->si_band
= from
->cpt_si_band
;
890 to
->si_fd
= from
->cpt_si_fd
;
893 to
->si_pid
= from
->cpt_si_pid
;
894 to
->si_uid
= from
->cpt_si_uid
;
895 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
901 #endif /* __x86_64__ */
903 /* Convert a native/host siginfo object, into/from the siginfo in the
904 layout of the inferiors' architecture. Returns true if any
905 conversion was done; false otherwise. If DIRECTION is 1, then copy
906 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
910 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
913 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
914 if (register_size (0) == 4)
916 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
917 fatal ("unexpected difference in siginfo");
920 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
922 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
933 /* Update gdbserver_xmltarget. */
936 x86_linux_update_xmltarget (void)
939 struct regset_info
*regset
;
940 static unsigned long long xcr0
;
941 static int have_ptrace_getregset
= -1;
942 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
943 static int have_ptrace_getfpxregs
= -1;
946 if (!current_inferior
)
949 /* Before changing the register cache internal layout or the target
950 regsets, flush the contents of the current valid caches back to
952 regcache_invalidate ();
954 pid
= pid_of (get_thread_lwp (current_inferior
));
956 if (num_xmm_registers
== 8)
957 init_registers_i386_linux ();
959 init_registers_amd64_linux ();
962 # ifdef HAVE_PTRACE_GETFPXREGS
963 if (have_ptrace_getfpxregs
== -1)
965 elf_fpxregset_t fpxregs
;
967 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
969 have_ptrace_getfpxregs
= 0;
970 x86_xcr0
= I386_XSTATE_X87_MASK
;
972 /* Disable PTRACE_GETFPXREGS. */
973 for (regset
= target_regsets
;
974 regset
->fill_function
!= NULL
; regset
++)
975 if (regset
->get_request
== PTRACE_GETFPXREGS
)
982 have_ptrace_getfpxregs
= 1;
985 if (!have_ptrace_getfpxregs
)
987 init_registers_i386_mmx_linux ();
991 init_registers_i386_linux ();
999 if (num_xmm_registers
== 8)
1000 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1002 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1004 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1007 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1012 /* Check if XSAVE extended state is supported. */
1013 if (have_ptrace_getregset
== -1)
1015 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1018 iov
.iov_base
= xstateregs
;
1019 iov
.iov_len
= sizeof (xstateregs
);
1021 /* Check if PTRACE_GETREGSET works. */
1022 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1025 have_ptrace_getregset
= 0;
1029 have_ptrace_getregset
= 1;
1031 /* Get XCR0 from XSAVE extended state at byte 464. */
1032 xcr0
= xstateregs
[464 / sizeof (long long)];
1034 /* Use PTRACE_GETREGSET if it is available. */
1035 for (regset
= target_regsets
;
1036 regset
->fill_function
!= NULL
; regset
++)
1037 if (regset
->get_request
== PTRACE_GETREGSET
)
1038 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1039 else if (regset
->type
!= GENERAL_REGS
)
1043 if (have_ptrace_getregset
)
1045 /* AVX is the highest feature we support. */
1046 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1051 /* I386 has 8 xmm regs. */
1052 if (num_xmm_registers
== 8)
1053 init_registers_i386_avx_linux ();
1055 init_registers_amd64_avx_linux ();
1057 init_registers_i386_avx_linux ();
1063 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1064 PTRACE_GETREGSET. */
1067 x86_linux_process_qsupported (const char *query
)
1069 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1070 with "i386" in qSupported query, it supports x86 XML target
1073 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1075 char *copy
= xstrdup (query
+ 13);
1078 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1080 if (strcmp (p
, "i386") == 0)
1090 x86_linux_update_xmltarget ();
1093 /* Initialize gdbserver for the architecture of the inferior. */
1096 x86_arch_setup (void)
1099 int pid
= pid_of (get_thread_lwp (current_inferior
));
1100 char *file
= linux_child_pid_to_exec_file (pid
);
1101 int use_64bit
= elf_64_file_p (file
);
1107 /* This can only happen if /proc/<pid>/exe is unreadable,
1108 but "that can't happen" if we've gotten this far.
1109 Fall through and assume this is a 32-bit program. */
1113 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1114 the_low_target
.num_regs
= -1;
1115 the_low_target
.regmap
= NULL
;
1116 the_low_target
.cannot_fetch_register
= NULL
;
1117 the_low_target
.cannot_store_register
= NULL
;
1119 /* Amd64 has 16 xmm regs. */
1120 num_xmm_registers
= 16;
1122 x86_linux_update_xmltarget ();
1127 /* Ok we have a 32-bit inferior. */
1129 the_low_target
.num_regs
= I386_NUM_REGS
;
1130 the_low_target
.regmap
= i386_regmap
;
1131 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1132 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1134 /* I386 has 8 xmm regs. */
1135 num_xmm_registers
= 8;
1137 x86_linux_update_xmltarget ();
1141 x86_supports_tracepoints (void)
1147 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1149 write_inferior_memory (*to
, buf
, len
);
1154 push_opcode (unsigned char *buf
, char *op
)
1156 unsigned char *buf_org
= buf
;
1161 unsigned long ul
= strtoul (op
, &endptr
, 16);
1170 return buf
- buf_org
;
1175 /* Build a jump pad that saves registers and calls a collection
1176 function. Writes a jump instruction to the jump pad to
1177 JJUMPAD_INSN. The caller is responsible to write it in at the
1178 tracepoint address. */
1181 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1182 CORE_ADDR collector
,
1185 CORE_ADDR
*jump_entry
,
1186 CORE_ADDR
*trampoline
,
1187 ULONGEST
*trampoline_size
,
1188 unsigned char *jjump_pad_insn
,
1189 ULONGEST
*jjump_pad_insn_size
,
1190 CORE_ADDR
*adjusted_insn_addr
,
1191 CORE_ADDR
*adjusted_insn_addr_end
,
1194 unsigned char buf
[40];
1196 CORE_ADDR buildaddr
= *jump_entry
;
1198 /* Build the jump pad. */
1200 /* First, do tracepoint data collection. Save registers. */
1202 /* Need to ensure stack pointer saved first. */
1203 buf
[i
++] = 0x54; /* push %rsp */
1204 buf
[i
++] = 0x55; /* push %rbp */
1205 buf
[i
++] = 0x57; /* push %rdi */
1206 buf
[i
++] = 0x56; /* push %rsi */
1207 buf
[i
++] = 0x52; /* push %rdx */
1208 buf
[i
++] = 0x51; /* push %rcx */
1209 buf
[i
++] = 0x53; /* push %rbx */
1210 buf
[i
++] = 0x50; /* push %rax */
1211 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1212 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1213 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1214 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1215 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1216 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1217 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1219 buf
[i
++] = 0x9c; /* pushfq */
1220 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1222 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1223 i
+= sizeof (unsigned long);
1224 buf
[i
++] = 0x57; /* push %rdi */
1225 append_insns (&buildaddr
, i
, buf
);
1227 /* Stack space for the collecting_t object. */
1229 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1230 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1231 memcpy (buf
+ i
, &tpoint
, 8);
1233 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1234 i
+= push_opcode (&buf
[i
],
1235 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1236 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1237 append_insns (&buildaddr
, i
, buf
);
1241 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1242 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1244 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1245 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1246 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1247 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1248 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1249 append_insns (&buildaddr
, i
, buf
);
1251 /* Set up the gdb_collect call. */
1252 /* At this point, (stack pointer + 0x18) is the base of our saved
1256 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1257 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1259 /* tpoint address may be 64-bit wide. */
1260 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1261 memcpy (buf
+ i
, &tpoint
, 8);
1263 append_insns (&buildaddr
, i
, buf
);
1265 /* The collector function being in the shared library, may be
1266 >31-bits away off the jump pad. */
1268 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1269 memcpy (buf
+ i
, &collector
, 8);
1271 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1272 append_insns (&buildaddr
, i
, buf
);
1274 /* Clear the spin-lock. */
1276 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1277 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1278 memcpy (buf
+ i
, &lockaddr
, 8);
1280 append_insns (&buildaddr
, i
, buf
);
1282 /* Remove stack that had been used for the collect_t object. */
1284 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1285 append_insns (&buildaddr
, i
, buf
);
1287 /* Restore register state. */
1289 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1293 buf
[i
++] = 0x9d; /* popfq */
1294 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1295 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1296 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1297 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1298 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1299 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1300 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1301 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1302 buf
[i
++] = 0x58; /* pop %rax */
1303 buf
[i
++] = 0x5b; /* pop %rbx */
1304 buf
[i
++] = 0x59; /* pop %rcx */
1305 buf
[i
++] = 0x5a; /* pop %rdx */
1306 buf
[i
++] = 0x5e; /* pop %rsi */
1307 buf
[i
++] = 0x5f; /* pop %rdi */
1308 buf
[i
++] = 0x5d; /* pop %rbp */
1309 buf
[i
++] = 0x5c; /* pop %rsp */
1310 append_insns (&buildaddr
, i
, buf
);
1312 /* Now, adjust the original instruction to execute in the jump
1314 *adjusted_insn_addr
= buildaddr
;
1315 relocate_instruction (&buildaddr
, tpaddr
);
1316 *adjusted_insn_addr_end
= buildaddr
;
1318 /* Finally, write a jump back to the program. */
1319 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1320 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1321 memcpy (buf
+ 1, &offset
, 4);
1322 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1324 /* The jump pad is now built. Wire in a jump to our jump pad. This
1325 is always done last (by our caller actually), so that we can
1326 install fast tracepoints with threads running. This relies on
1327 the agent's atomic write support. */
1328 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1329 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1330 memcpy (buf
+ 1, &offset
, 4);
1331 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1332 *jjump_pad_insn_size
= sizeof (jump_insn
);
1334 /* Return the end address of our pad. */
1335 *jump_entry
= buildaddr
;
1340 #endif /* __x86_64__ */
1342 /* Build a jump pad that saves registers and calls a collection
1343 function. Writes a jump instruction to the jump pad to
1344 JJUMPAD_INSN. The caller is responsible to write it in at the
1345 tracepoint address. */
1348 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1349 CORE_ADDR collector
,
1352 CORE_ADDR
*jump_entry
,
1353 CORE_ADDR
*trampoline
,
1354 ULONGEST
*trampoline_size
,
1355 unsigned char *jjump_pad_insn
,
1356 ULONGEST
*jjump_pad_insn_size
,
1357 CORE_ADDR
*adjusted_insn_addr
,
1358 CORE_ADDR
*adjusted_insn_addr_end
,
1361 unsigned char buf
[0x100];
1363 CORE_ADDR buildaddr
= *jump_entry
;
1365 /* Build the jump pad. */
1367 /* First, do tracepoint data collection. Save registers. */
1369 buf
[i
++] = 0x60; /* pushad */
1370 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1371 *((int *)(buf
+ i
)) = (int) tpaddr
;
1373 buf
[i
++] = 0x9c; /* pushf */
1374 buf
[i
++] = 0x1e; /* push %ds */
1375 buf
[i
++] = 0x06; /* push %es */
1376 buf
[i
++] = 0x0f; /* push %fs */
1378 buf
[i
++] = 0x0f; /* push %gs */
1380 buf
[i
++] = 0x16; /* push %ss */
1381 buf
[i
++] = 0x0e; /* push %cs */
1382 append_insns (&buildaddr
, i
, buf
);
1384 /* Stack space for the collecting_t object. */
1386 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1388 /* Build the object. */
1389 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1390 memcpy (buf
+ i
, &tpoint
, 4);
1392 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1394 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1395 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1396 append_insns (&buildaddr
, i
, buf
);
1398 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1399 If we cared for it, this could be using xchg alternatively. */
1402 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1403 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1405 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1407 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1408 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1409 append_insns (&buildaddr
, i
, buf
);
1412 /* Set up arguments to the gdb_collect call. */
1414 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1415 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1416 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1417 append_insns (&buildaddr
, i
, buf
);
1420 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1421 append_insns (&buildaddr
, i
, buf
);
1424 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1425 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1427 append_insns (&buildaddr
, i
, buf
);
1429 buf
[0] = 0xe8; /* call <reladdr> */
1430 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1431 memcpy (buf
+ 1, &offset
, 4);
1432 append_insns (&buildaddr
, 5, buf
);
1433 /* Clean up after the call. */
1434 buf
[0] = 0x83; /* add $0x8,%esp */
1437 append_insns (&buildaddr
, 3, buf
);
1440 /* Clear the spin-lock. This would need the LOCK prefix on older
1443 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1444 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1445 memcpy (buf
+ i
, &lockaddr
, 4);
1447 append_insns (&buildaddr
, i
, buf
);
1450 /* Remove stack that had been used for the collect_t object. */
1452 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1453 append_insns (&buildaddr
, i
, buf
);
1456 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1459 buf
[i
++] = 0x17; /* pop %ss */
1460 buf
[i
++] = 0x0f; /* pop %gs */
1462 buf
[i
++] = 0x0f; /* pop %fs */
1464 buf
[i
++] = 0x07; /* pop %es */
1465 buf
[i
++] = 0x1f; /* pop %ds */
1466 buf
[i
++] = 0x9d; /* popf */
1467 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1470 buf
[i
++] = 0x61; /* popad */
1471 append_insns (&buildaddr
, i
, buf
);
1473 /* Now, adjust the original instruction to execute in the jump
1475 *adjusted_insn_addr
= buildaddr
;
1476 relocate_instruction (&buildaddr
, tpaddr
);
1477 *adjusted_insn_addr_end
= buildaddr
;
1479 /* Write the jump back to the program. */
1480 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1481 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1482 memcpy (buf
+ 1, &offset
, 4);
1483 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1485 /* The jump pad is now built. Wire in a jump to our jump pad. This
1486 is always done last (by our caller actually), so that we can
1487 install fast tracepoints with threads running. This relies on
1488 the agent's atomic write support. */
1491 /* Create a trampoline. */
1492 *trampoline_size
= sizeof (jump_insn
);
1493 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1495 /* No trampoline space available. */
1497 "E.Cannot allocate trampoline space needed for fast "
1498 "tracepoints on 4-byte instructions.");
1502 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1503 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1504 memcpy (buf
+ 1, &offset
, 4);
1505 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1507 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1508 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1509 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1510 memcpy (buf
+ 2, &offset
, 2);
1511 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1512 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1516 /* Else use a 32-bit relative jump instruction. */
1517 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1518 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1519 memcpy (buf
+ 1, &offset
, 4);
1520 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1521 *jjump_pad_insn_size
= sizeof (jump_insn
);
1524 /* Return the end address of our pad. */
1525 *jump_entry
= buildaddr
;
1531 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1532 CORE_ADDR collector
,
1535 CORE_ADDR
*jump_entry
,
1536 CORE_ADDR
*trampoline
,
1537 ULONGEST
*trampoline_size
,
1538 unsigned char *jjump_pad_insn
,
1539 ULONGEST
*jjump_pad_insn_size
,
1540 CORE_ADDR
*adjusted_insn_addr
,
1541 CORE_ADDR
*adjusted_insn_addr_end
,
1545 if (register_size (0) == 8)
1546 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1547 collector
, lockaddr
,
1548 orig_size
, jump_entry
,
1549 trampoline
, trampoline_size
,
1551 jjump_pad_insn_size
,
1553 adjusted_insn_addr_end
,
1557 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1558 collector
, lockaddr
,
1559 orig_size
, jump_entry
,
1560 trampoline
, trampoline_size
,
1562 jjump_pad_insn_size
,
1564 adjusted_insn_addr_end
,
1568 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1572 x86_get_min_fast_tracepoint_insn_len (void)
1574 static int warned_about_fast_tracepoints
= 0;
1577 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1578 used for fast tracepoints. */
1579 if (register_size (0) == 8)
1583 if (in_process_agent_loaded ())
1585 char errbuf
[IPA_BUFSIZ
];
1589 /* On x86, if trampolines are available, then 4-byte jump instructions
1590 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1591 with a 4-byte offset are used instead. */
1592 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1596 /* GDB has no channel to explain to user why a shorter fast
1597 tracepoint is not possible, but at least make GDBserver
1598 mention that something has gone awry. */
1599 if (!warned_about_fast_tracepoints
)
1601 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1602 warned_about_fast_tracepoints
= 1;
1609 /* Indicate that the minimum length is currently unknown since the IPA
1610 has not loaded yet. */
1616 add_insns (unsigned char *start
, int len
)
1618 CORE_ADDR buildaddr
= current_insn_ptr
;
1621 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1622 len
, paddress (buildaddr
));
1624 append_insns (&buildaddr
, len
, start
);
1625 current_insn_ptr
= buildaddr
;
1628 /* Our general strategy for emitting code is to avoid specifying raw
1629 bytes whenever possible, and instead copy a block of inline asm
1630 that is embedded in the function. This is a little messy, because
1631 we need to keep the compiler from discarding what looks like dead
1632 code, plus suppress various warnings. */
1634 #define EMIT_ASM(NAME, INSNS) \
1637 extern unsigned char start_ ## NAME, end_ ## NAME; \
1638 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1639 __asm__ ("jmp end_" #NAME "\n" \
1640 "\t" "start_" #NAME ":" \
1642 "\t" "end_" #NAME ":"); \
1647 #define EMIT_ASM32(NAME,INSNS) \
1650 extern unsigned char start_ ## NAME, end_ ## NAME; \
1651 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1652 __asm__ (".code32\n" \
1653 "\t" "jmp end_" #NAME "\n" \
1654 "\t" "start_" #NAME ":\n" \
1656 "\t" "end_" #NAME ":\n" \
1662 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1669 amd64_emit_prologue (void)
1671 EMIT_ASM (amd64_prologue
,
1673 "movq %rsp,%rbp\n\t"
1674 "sub $0x20,%rsp\n\t"
1675 "movq %rdi,-8(%rbp)\n\t"
1676 "movq %rsi,-16(%rbp)");
1681 amd64_emit_epilogue (void)
1683 EMIT_ASM (amd64_epilogue
,
1684 "movq -16(%rbp),%rdi\n\t"
1685 "movq %rax,(%rdi)\n\t"
1692 amd64_emit_add (void)
1694 EMIT_ASM (amd64_add
,
1695 "add (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1700 amd64_emit_sub (void)
1702 EMIT_ASM (amd64_sub
,
1703 "sub %rax,(%rsp)\n\t"
1708 amd64_emit_mul (void)
1714 amd64_emit_lsh (void)
1720 amd64_emit_rsh_signed (void)
1726 amd64_emit_rsh_unsigned (void)
1732 amd64_emit_ext (int arg
)
1737 EMIT_ASM (amd64_ext_8
,
1743 EMIT_ASM (amd64_ext_16
,
1748 EMIT_ASM (amd64_ext_32
,
1757 amd64_emit_log_not (void)
1759 EMIT_ASM (amd64_log_not
,
1760 "test %rax,%rax\n\t"
1766 amd64_emit_bit_and (void)
1768 EMIT_ASM (amd64_and
,
1769 "and (%rsp),%rax\n\t"
1770 "lea 0x8(%rsp),%rsp");
1774 amd64_emit_bit_or (void)
1777 "or (%rsp),%rax\n\t"
1778 "lea 0x8(%rsp),%rsp");
1782 amd64_emit_bit_xor (void)
1784 EMIT_ASM (amd64_xor
,
1785 "xor (%rsp),%rax\n\t"
1786 "lea 0x8(%rsp),%rsp");
1790 amd64_emit_bit_not (void)
1792 EMIT_ASM (amd64_bit_not
,
1793 "xorq $0xffffffffffffffff,%rax");
1797 amd64_emit_equal (void)
1799 EMIT_ASM (amd64_equal
,
1800 "cmp %rax,(%rsp)\n\t"
1801 "je .Lamd64_equal_true\n\t"
1803 "jmp .Lamd64_equal_end\n\t"
1804 ".Lamd64_equal_true:\n\t"
1806 ".Lamd64_equal_end:\n\t"
1807 "lea 0x8(%rsp),%rsp");
1811 amd64_emit_less_signed (void)
1813 EMIT_ASM (amd64_less_signed
,
1814 "cmp %rax,(%rsp)\n\t"
1815 "jl .Lamd64_less_signed_true\n\t"
1817 "jmp .Lamd64_less_signed_end\n\t"
1818 ".Lamd64_less_signed_true:\n\t"
1820 ".Lamd64_less_signed_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1825 amd64_emit_less_unsigned (void)
1827 EMIT_ASM (amd64_less_unsigned
,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jb .Lamd64_less_unsigned_true\n\t"
1831 "jmp .Lamd64_less_unsigned_end\n\t"
1832 ".Lamd64_less_unsigned_true:\n\t"
1834 ".Lamd64_less_unsigned_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1839 amd64_emit_ref (int size
)
1844 EMIT_ASM (amd64_ref1
,
1848 EMIT_ASM (amd64_ref2
,
1852 EMIT_ASM (amd64_ref4
,
1853 "movl (%rax),%eax");
1856 EMIT_ASM (amd64_ref8
,
1857 "movq (%rax),%rax");
1863 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1865 EMIT_ASM (amd64_if_goto
,
1869 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1877 amd64_emit_goto (int *offset_p
, int *size_p
)
1879 EMIT_ASM (amd64_goto
,
1880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1888 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1890 int diff
= (to
- (from
+ size
));
1891 unsigned char buf
[sizeof (int)];
1899 memcpy (buf
, &diff
, sizeof (int));
1900 write_inferior_memory (from
, buf
, sizeof (int));
1904 amd64_emit_const (LONGEST num
)
1906 unsigned char buf
[16];
1908 CORE_ADDR buildaddr
= current_insn_ptr
;
1911 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1912 memcpy (&buf
[i
], &num
, sizeof (num
));
1914 append_insns (&buildaddr
, i
, buf
);
1915 current_insn_ptr
= buildaddr
;
1919 amd64_emit_call (CORE_ADDR fn
)
1921 unsigned char buf
[16];
1923 CORE_ADDR buildaddr
;
1926 /* The destination function being in the shared library, may be
1927 >31-bits away off the compiled code pad. */
1929 buildaddr
= current_insn_ptr
;
1931 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1935 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1937 /* Offset is too large for a call. Use callq, but that requires
1938 a register, so avoid it if possible. Use r10, since it is
1939 call-clobbered, we don't have to push/pop it. */
1940 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1942 memcpy (buf
+ i
, &fn
, 8);
1944 buf
[i
++] = 0xff; /* callq *%r10 */
1949 int offset32
= offset64
; /* we know we can't overflow here. */
1950 memcpy (buf
+ i
, &offset32
, 4);
1954 append_insns (&buildaddr
, i
, buf
);
1955 current_insn_ptr
= buildaddr
;
1959 amd64_emit_reg (int reg
)
1961 unsigned char buf
[16];
1963 CORE_ADDR buildaddr
;
1965 /* Assume raw_regs is still in %rdi. */
1966 buildaddr
= current_insn_ptr
;
1968 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1969 memcpy (&buf
[i
], ®
, sizeof (reg
));
1971 append_insns (&buildaddr
, i
, buf
);
1972 current_insn_ptr
= buildaddr
;
1973 amd64_emit_call (get_raw_reg_func_addr ());
1977 amd64_emit_pop (void)
1979 EMIT_ASM (amd64_pop
,
1984 amd64_emit_stack_flush (void)
1986 EMIT_ASM (amd64_stack_flush
,
1991 amd64_emit_zero_ext (int arg
)
1996 EMIT_ASM (amd64_zero_ext_8
,
2000 EMIT_ASM (amd64_zero_ext_16
,
2001 "and $0xffff,%rax");
2004 EMIT_ASM (amd64_zero_ext_32
,
2005 "mov $0xffffffff,%rcx\n\t"
2014 amd64_emit_swap (void)
2016 EMIT_ASM (amd64_swap
,
2023 amd64_emit_stack_adjust (int n
)
2025 unsigned char buf
[16];
2027 CORE_ADDR buildaddr
= current_insn_ptr
;
2030 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2034 /* This only handles adjustments up to 16, but we don't expect any more. */
2036 append_insns (&buildaddr
, i
, buf
);
2037 current_insn_ptr
= buildaddr
;
2040 /* FN's prototype is `LONGEST(*fn)(int)'. */
2043 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2045 unsigned char buf
[16];
2047 CORE_ADDR buildaddr
;
2049 buildaddr
= current_insn_ptr
;
2051 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2052 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2054 append_insns (&buildaddr
, i
, buf
);
2055 current_insn_ptr
= buildaddr
;
2056 amd64_emit_call (fn
);
2059 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2062 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2064 unsigned char buf
[16];
2066 CORE_ADDR buildaddr
;
2068 buildaddr
= current_insn_ptr
;
2070 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2073 append_insns (&buildaddr
, i
, buf
);
2074 current_insn_ptr
= buildaddr
;
2075 EMIT_ASM (amd64_void_call_2_a
,
2076 /* Save away a copy of the stack top. */
2078 /* Also pass top as the second argument. */
2080 amd64_emit_call (fn
);
2081 EMIT_ASM (amd64_void_call_2_b
,
2082 /* Restore the stack top, %rax may have been trashed. */
2087 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2090 "cmp %rax,(%rsp)\n\t"
2091 "jne .Lamd64_eq_fallthru\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2094 /* jmp, but don't trust the assembler to choose the right jump */
2095 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2096 ".Lamd64_eq_fallthru:\n\t"
2097 "lea 0x8(%rsp),%rsp\n\t"
2107 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2110 "cmp %rax,(%rsp)\n\t"
2111 "je .Lamd64_ne_fallthru\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2114 /* jmp, but don't trust the assembler to choose the right jump */
2115 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2116 ".Lamd64_ne_fallthru:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2127 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2130 "cmp %rax,(%rsp)\n\t"
2131 "jnl .Lamd64_lt_fallthru\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_lt_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2147 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2150 "cmp %rax,(%rsp)\n\t"
2151 "jnle .Lamd64_le_fallthru\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_le_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2167 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2170 "cmp %rax,(%rsp)\n\t"
2171 "jng .Lamd64_gt_fallthru\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2174 /* jmp, but don't trust the assembler to choose the right jump */
2175 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2176 ".Lamd64_gt_fallthru:\n\t"
2177 "lea 0x8(%rsp),%rsp\n\t"
2187 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2190 "cmp %rax,(%rsp)\n\t"
2191 "jnge .Lamd64_ge_fallthru\n\t"
2192 ".Lamd64_ge_jump:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_ge_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2207 struct emit_ops amd64_emit_ops
=
2209 amd64_emit_prologue
,
2210 amd64_emit_epilogue
,
2215 amd64_emit_rsh_signed
,
2216 amd64_emit_rsh_unsigned
,
2224 amd64_emit_less_signed
,
2225 amd64_emit_less_unsigned
,
2229 amd64_write_goto_address
,
2234 amd64_emit_stack_flush
,
2235 amd64_emit_zero_ext
,
2237 amd64_emit_stack_adjust
,
2238 amd64_emit_int_call_1
,
2239 amd64_emit_void_call_2
,
2248 #endif /* __x86_64__ */
2251 i386_emit_prologue (void)
2253 EMIT_ASM32 (i386_prologue
,
2257 /* At this point, the raw regs base address is at 8(%ebp), and the
2258 value pointer is at 12(%ebp). */
2262 i386_emit_epilogue (void)
2264 EMIT_ASM32 (i386_epilogue
,
2265 "mov 12(%ebp),%ecx\n\t"
2266 "mov %eax,(%ecx)\n\t"
2267 "mov %ebx,0x4(%ecx)\n\t"
2275 i386_emit_add (void)
2277 EMIT_ASM32 (i386_add
,
2278 "add (%esp),%eax\n\t"
2279 "adc 0x4(%esp),%ebx\n\t"
2280 "lea 0x8(%esp),%esp");
2284 i386_emit_sub (void)
2286 EMIT_ASM32 (i386_sub
,
2287 "subl %eax,(%esp)\n\t"
2288 "sbbl %ebx,4(%esp)\n\t"
2294 i386_emit_mul (void)
2300 i386_emit_lsh (void)
2306 i386_emit_rsh_signed (void)
2312 i386_emit_rsh_unsigned (void)
2318 i386_emit_ext (int arg
)
2323 EMIT_ASM32 (i386_ext_8
,
2326 "movl %eax,%ebx\n\t"
2330 EMIT_ASM32 (i386_ext_16
,
2332 "movl %eax,%ebx\n\t"
2336 EMIT_ASM32 (i386_ext_32
,
2337 "movl %eax,%ebx\n\t"
2346 i386_emit_log_not (void)
2348 EMIT_ASM32 (i386_log_not
,
2350 "test %eax,%eax\n\t"
2357 i386_emit_bit_and (void)
2359 EMIT_ASM32 (i386_and
,
2360 "and (%esp),%eax\n\t"
2361 "and 0x4(%esp),%ebx\n\t"
2362 "lea 0x8(%esp),%esp");
2366 i386_emit_bit_or (void)
2368 EMIT_ASM32 (i386_or
,
2369 "or (%esp),%eax\n\t"
2370 "or 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2375 i386_emit_bit_xor (void)
2377 EMIT_ASM32 (i386_xor
,
2378 "xor (%esp),%eax\n\t"
2379 "xor 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2384 i386_emit_bit_not (void)
2386 EMIT_ASM32 (i386_bit_not
,
2387 "xor $0xffffffff,%eax\n\t"
2388 "xor $0xffffffff,%ebx\n\t");
2392 i386_emit_equal (void)
2394 EMIT_ASM32 (i386_equal
,
2395 "cmpl %ebx,4(%esp)\n\t"
2396 "jne .Li386_equal_false\n\t"
2397 "cmpl %eax,(%esp)\n\t"
2398 "je .Li386_equal_true\n\t"
2399 ".Li386_equal_false:\n\t"
2401 "jmp .Li386_equal_end\n\t"
2402 ".Li386_equal_true:\n\t"
2404 ".Li386_equal_end:\n\t"
2406 "lea 0x8(%esp),%esp");
2410 i386_emit_less_signed (void)
2412 EMIT_ASM32 (i386_less_signed
,
2413 "cmpl %ebx,4(%esp)\n\t"
2414 "jl .Li386_less_signed_true\n\t"
2415 "jne .Li386_less_signed_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "jl .Li386_less_signed_true\n\t"
2418 ".Li386_less_signed_false:\n\t"
2420 "jmp .Li386_less_signed_end\n\t"
2421 ".Li386_less_signed_true:\n\t"
2423 ".Li386_less_signed_end:\n\t"
2425 "lea 0x8(%esp),%esp");
2429 i386_emit_less_unsigned (void)
2431 EMIT_ASM32 (i386_less_unsigned
,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jb .Li386_less_unsigned_true\n\t"
2434 "jne .Li386_less_unsigned_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jb .Li386_less_unsigned_true\n\t"
2437 ".Li386_less_unsigned_false:\n\t"
2439 "jmp .Li386_less_unsigned_end\n\t"
2440 ".Li386_less_unsigned_true:\n\t"
2442 ".Li386_less_unsigned_end:\n\t"
2444 "lea 0x8(%esp),%esp");
2448 i386_emit_ref (int size
)
2453 EMIT_ASM32 (i386_ref1
,
2457 EMIT_ASM32 (i386_ref2
,
2461 EMIT_ASM32 (i386_ref4
,
2462 "movl (%eax),%eax");
2465 EMIT_ASM32 (i386_ref8
,
2466 "movl 4(%eax),%ebx\n\t"
2467 "movl (%eax),%eax");
2473 i386_emit_if_goto (int *offset_p
, int *size_p
)
2475 EMIT_ASM32 (i386_if_goto
,
2481 /* Don't trust the assembler to choose the right jump */
2482 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2485 *offset_p
= 11; /* be sure that this matches the sequence above */
2491 i386_emit_goto (int *offset_p
, int *size_p
)
2493 EMIT_ASM32 (i386_goto
,
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2503 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2505 int diff
= (to
- (from
+ size
));
2506 unsigned char buf
[sizeof (int)];
2508 /* We're only doing 4-byte sizes at the moment. */
2515 memcpy (buf
, &diff
, sizeof (int));
2516 write_inferior_memory (from
, buf
, sizeof (int));
2520 i386_emit_const (LONGEST num
)
2522 unsigned char buf
[16];
2524 CORE_ADDR buildaddr
= current_insn_ptr
;
2527 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2528 lo
= num
& 0xffffffff;
2529 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2531 hi
= ((num
>> 32) & 0xffffffff);
2534 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2535 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2540 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2542 append_insns (&buildaddr
, i
, buf
);
2543 current_insn_ptr
= buildaddr
;
2547 i386_emit_call (CORE_ADDR fn
)
2549 unsigned char buf
[16];
2551 CORE_ADDR buildaddr
;
2553 buildaddr
= current_insn_ptr
;
2555 buf
[i
++] = 0xe8; /* call <reladdr> */
2556 offset
= ((int) fn
) - (buildaddr
+ 5);
2557 memcpy (buf
+ 1, &offset
, 4);
2558 append_insns (&buildaddr
, 5, buf
);
2559 current_insn_ptr
= buildaddr
;
2563 i386_emit_reg (int reg
)
2565 unsigned char buf
[16];
2567 CORE_ADDR buildaddr
;
2569 EMIT_ASM32 (i386_reg_a
,
2571 buildaddr
= current_insn_ptr
;
2573 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2574 memcpy (&buf
[i
], ®
, sizeof (reg
));
2576 append_insns (&buildaddr
, i
, buf
);
2577 current_insn_ptr
= buildaddr
;
2578 EMIT_ASM32 (i386_reg_b
,
2579 "mov %eax,4(%esp)\n\t"
2580 "mov 8(%ebp),%eax\n\t"
2582 i386_emit_call (get_raw_reg_func_addr ());
2583 EMIT_ASM32 (i386_reg_c
,
2585 "lea 0x8(%esp),%esp");
2589 i386_emit_pop (void)
2591 EMIT_ASM32 (i386_pop
,
2597 i386_emit_stack_flush (void)
2599 EMIT_ASM32 (i386_stack_flush
,
2605 i386_emit_zero_ext (int arg
)
2610 EMIT_ASM32 (i386_zero_ext_8
,
2611 "and $0xff,%eax\n\t"
2615 EMIT_ASM32 (i386_zero_ext_16
,
2616 "and $0xffff,%eax\n\t"
2620 EMIT_ASM32 (i386_zero_ext_32
,
2629 i386_emit_swap (void)
2631 EMIT_ASM32 (i386_swap
,
2641 i386_emit_stack_adjust (int n
)
2643 unsigned char buf
[16];
2645 CORE_ADDR buildaddr
= current_insn_ptr
;
2648 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2652 append_insns (&buildaddr
, i
, buf
);
2653 current_insn_ptr
= buildaddr
;
2656 /* FN's prototype is `LONGEST(*fn)(int)'. */
2659 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2661 unsigned char buf
[16];
2663 CORE_ADDR buildaddr
;
2665 EMIT_ASM32 (i386_int_call_1_a
,
2666 /* Reserve a bit of stack space. */
2668 /* Put the one argument on the stack. */
2669 buildaddr
= current_insn_ptr
;
2671 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2674 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2676 append_insns (&buildaddr
, i
, buf
);
2677 current_insn_ptr
= buildaddr
;
2678 i386_emit_call (fn
);
2679 EMIT_ASM32 (i386_int_call_1_c
,
2681 "lea 0x8(%esp),%esp");
2684 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2687 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2689 unsigned char buf
[16];
2691 CORE_ADDR buildaddr
;
2693 EMIT_ASM32 (i386_void_call_2_a
,
2694 /* Preserve %eax only; we don't have to worry about %ebx. */
2696 /* Reserve a bit of stack space for arguments. */
2697 "sub $0x10,%esp\n\t"
2698 /* Copy "top" to the second argument position. (Note that
2699 we can't assume function won't scribble on its
2700 arguments, so don't try to restore from this.) */
2701 "mov %eax,4(%esp)\n\t"
2702 "mov %ebx,8(%esp)");
2703 /* Put the first argument on the stack. */
2704 buildaddr
= current_insn_ptr
;
2706 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2709 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2711 append_insns (&buildaddr
, i
, buf
);
2712 current_insn_ptr
= buildaddr
;
2713 i386_emit_call (fn
);
2714 EMIT_ASM32 (i386_void_call_2_b
,
2715 "lea 0x10(%esp),%esp\n\t"
2716 /* Restore original stack top. */
2722 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2725 /* Check low half first, more likely to be decider */
2726 "cmpl %eax,(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jne .Leq_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Leq_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2747 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2750 /* Check low half first, more likely to be decider */
2751 "cmpl %eax,(%esp)\n\t"
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "je .Lne_fallthru\n\t"
2756 "lea 0x8(%esp),%esp\n\t"
2759 /* jmp, but don't trust the assembler to choose the right jump */
2760 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2761 ".Lne_fallthru:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2773 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2776 "cmpl %ebx,4(%esp)\n\t"
2778 "jne .Llt_fallthru\n\t"
2779 "cmpl %eax,(%esp)\n\t"
2780 "jnl .Llt_fallthru\n\t"
2782 "lea 0x8(%esp),%esp\n\t"
2785 /* jmp, but don't trust the assembler to choose the right jump */
2786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2787 ".Llt_fallthru:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2799 i386_emit_le_goto (int *offset_p
, int *size_p
)
2802 "cmpl %ebx,4(%esp)\n\t"
2804 "jne .Lle_fallthru\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "jnle .Lle_fallthru\n\t"
2808 "lea 0x8(%esp),%esp\n\t"
2811 /* jmp, but don't trust the assembler to choose the right jump */
2812 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2813 ".Lle_fallthru:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2825 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2828 "cmpl %ebx,4(%esp)\n\t"
2830 "jne .Lgt_fallthru\n\t"
2831 "cmpl %eax,(%esp)\n\t"
2832 "jng .Lgt_fallthru\n\t"
2834 "lea 0x8(%esp),%esp\n\t"
2837 /* jmp, but don't trust the assembler to choose the right jump */
2838 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2839 ".Lgt_fallthru:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2851 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2854 "cmpl %ebx,4(%esp)\n\t"
2856 "jne .Lge_fallthru\n\t"
2857 "cmpl %eax,(%esp)\n\t"
2858 "jnge .Lge_fallthru\n\t"
2860 "lea 0x8(%esp),%esp\n\t"
2863 /* jmp, but don't trust the assembler to choose the right jump */
2864 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2865 ".Lge_fallthru:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2876 struct emit_ops i386_emit_ops
=
2884 i386_emit_rsh_signed
,
2885 i386_emit_rsh_unsigned
,
2893 i386_emit_less_signed
,
2894 i386_emit_less_unsigned
,
2898 i386_write_goto_address
,
2903 i386_emit_stack_flush
,
2906 i386_emit_stack_adjust
,
2907 i386_emit_int_call_1
,
2908 i386_emit_void_call_2
,
2918 static struct emit_ops
*
2922 int use_64bit
= register_size (0) == 8;
2925 return &amd64_emit_ops
;
2928 return &i386_emit_ops
;
2931 /* This is initialized assuming an amd64 target.
2932 x86_arch_setup will correct it for i386 or amd64 targets. */
2934 struct linux_target_ops the_low_target
=
2950 x86_stopped_by_watchpoint
,
2951 x86_stopped_data_address
,
2952 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2953 native i386 case (no registers smaller than an xfer unit), and are not
2954 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2957 /* need to fix up i386 siginfo if host is amd64 */
2959 x86_linux_new_process
,
2960 x86_linux_new_thread
,
2961 x86_linux_prepare_to_resume
,
2962 x86_linux_process_qsupported
,
2963 x86_supports_tracepoints
,
2964 x86_get_thread_area
,
2965 x86_install_fast_tracepoint_jump_pad
,
2967 x86_get_min_fast_tracepoint_insn_len
,