/* GNU/Linux/x86-64 specific low level interface, for the remote server
for GDB.
- Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
- Free Software Foundation, Inc.
+ Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
This file is part of GDB.
#include <stddef.h>
#include <signal.h>
+#include <limits.h>
+#include <inttypes.h>
#include "server.h"
#include "linux-low.h"
#include "i387-fp.h"
#include "elf/common.h"
#include "gdb_proc_service.h"
+#include "agent.h"
/* Defined in auto-generated file i386-linux.c. */
void init_registers_i386_linux (void);
void init_registers_i386_mmx_linux (void);
static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
+static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
/* Backward compatibility for gdb without XML support. */
idx = gs >> reg_thread_area;
if (ptrace (PTRACE_GET_THREAD_AREA,
- lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
+ lwpid_of (lwp),
+ (void *) (long) idx, (unsigned long) &desc) < 0)
return -1;
*addr = desc[1];
error ("Couldn't write debug register");
}
+static int
+update_debug_registers_callback (struct inferior_list_entry *entry,
+ void *pid_p)
+{
+ struct lwp_info *lwp = (struct lwp_info *) entry;
+ int pid = *(int *) pid_p;
+
+ /* Only update the threads of this process. */
+ if (pid_of (lwp) == pid)
+ {
+ /* The actual update is done later just before resuming the lwp,
+ we just mark that the registers need updating. */
+ lwp->arch_private->debug_registers_changed = 1;
+
+ /* If the lwp isn't stopped, force it to momentarily pause, so
+ we can update its debug registers. */
+ if (!lwp->stopped)
+ linux_stop_lwp (lwp);
+ }
+
+ return 0;
+}
+
/* Update the inferior's debug register REGNUM from STATE. */
void
i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
{
- struct inferior_list_entry *lp;
- CORE_ADDR addr;
- /* Only need to update the threads of this process. */
+ /* Only update the threads of this process. */
int pid = pid_of (get_thread_lwp (current_inferior));
if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
fatal ("Invalid debug register %d", regnum);
- addr = state->dr_mirror[regnum];
+ find_inferior (&all_lwps, update_debug_registers_callback, &pid);
+}
- for (lp = all_lwps.head; lp; lp = lp->next)
- {
- struct lwp_info *lwp = (struct lwp_info *) lp;
+/* Return the inferior's debug register REGNUM. */
- /* The actual update is done later, we just mark that the register
- needs updating. */
- if (pid_of (lwp) == pid)
- lwp->arch_private->debug_registers_changed = 1;
- }
+CORE_ADDR
+i386_dr_low_get_addr (int regnum)
+{
+ struct lwp_info *lwp = get_thread_lwp (current_inferior);
+ ptid_t ptid = ptid_of (lwp);
+
+ /* DR6 and DR7 are retrieved with some other way. */
+ gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
+
+ return x86_linux_dr_get (ptid, regnum);
}
/* Update the inferior's DR7 debug control register from STATE. */
void
i386_dr_low_set_control (const struct i386_debug_reg_state *state)
{
- struct inferior_list_entry *lp;
- /* Only need to update the threads of this process. */
+ /* Only update the threads of this process. */
int pid = pid_of (get_thread_lwp (current_inferior));
- for (lp = all_lwps.head; lp; lp = lp->next)
- {
- struct lwp_info *lwp = (struct lwp_info *) lp;
+ find_inferior (&all_lwps, update_debug_registers_callback, &pid);
+}
- /* The actual update is done later, we just mark that the register
- needs updating. */
- if (pid_of (lwp) == pid)
- lwp->arch_private->debug_registers_changed = 1;
- }
+/* Return the inferior's DR7 debug control register. */
+
+unsigned
+i386_dr_low_get_control (void)
+{
+ struct lwp_info *lwp = get_thread_lwp (current_inferior);
+ ptid_t ptid = ptid_of (lwp);
+
+ return x86_linux_dr_get (ptid, DR_CONTROL);
}
/* Get the value of the DR6 debug status register from the inferior
and record it in STATE. */
-void
-i386_dr_low_get_status (struct i386_debug_reg_state *state)
+unsigned
+i386_dr_low_get_status (void)
{
struct lwp_info *lwp = get_thread_lwp (current_inferior);
ptid_t ptid = ptid_of (lwp);
- state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
+ return x86_linux_dr_get (ptid, DR_STATUS);
}
\f
-/* Watchpoint support. */
+/* Breakpoint/Watchpoint support. */
static int
x86_insert_point (char type, CORE_ADDR addr, int len)
switch (type)
{
case '0':
- return set_gdb_breakpoint_at (addr);
+ {
+ int ret;
+
+ ret = prepare_to_access_memory ();
+ if (ret)
+ return -1;
+ ret = set_gdb_breakpoint_at (addr);
+ done_accessing_memory ();
+ return ret;
+ }
case '2':
case '3':
case '4':
switch (type)
{
case '0':
- return delete_gdb_breakpoint_at (addr);
+ {
+ int ret;
+
+ ret = prepare_to_access_memory ();
+ if (ret)
+ return -1;
+ ret = delete_gdb_breakpoint_at (addr);
+ done_accessing_memory ();
+ return ret;
+ }
case '2':
case '3':
case '4':
x86_linux_prepare_to_resume (struct lwp_info *lwp)
{
ptid_t ptid = ptid_of (lwp);
+ int clear_status = 0;
if (lwp->arch_private->debug_registers_changed)
{
int i;
int pid = ptid_get_pid (ptid);
struct process_info *proc = find_process_pid (pid);
- struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
+ struct i386_debug_reg_state *state
+ = &proc->private->arch_private->debug_reg_state;
for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
- x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
+ if (state->dr_ref_count[i] > 0)
+ {
+ x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
+
+ /* If we're setting a watchpoint, any change the inferior
+ had done itself to the debug registers needs to be
+ discarded, otherwise, i386_low_stopped_data_address can
+ get confused. */
+ clear_status = 1;
+ }
x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
lwp->arch_private->debug_registers_changed = 0;
}
- if (lwp->stopped_by_watchpoint)
+ if (clear_status || lwp->stopped_by_watchpoint)
x86_linux_dr_set (ptid, DR_STATUS, 0);
}
\f
} _sifields;
} compat_siginfo_t;
+/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
+typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
+
+typedef struct compat_x32_siginfo
+{
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union
+ {
+ int _pad[((128 / sizeof (int)) - 3)];
+
+ /* kill() */
+ struct
+ {
+ unsigned int _pid;
+ unsigned int _uid;
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct
+ {
+ compat_timer_t _tid;
+ int _overrun;
+ compat_sigval_t _sigval;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct
+ {
+ unsigned int _pid;
+ unsigned int _uid;
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct
+ {
+ unsigned int _pid;
+ unsigned int _uid;
+ int _status;
+ compat_x32_clock_t _utime;
+ compat_x32_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct
+ {
+ unsigned int _addr;
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct
+ {
+ int _band;
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
+
#define cpt_si_pid _sifields._kill._pid
#define cpt_si_uid _sifields._kill._uid
#define cpt_si_timerid _sifields._timer._tid
to->si_errno = from->si_errno;
to->si_code = from->si_code;
- if (to->si_code < 0)
+ if (to->si_code == SI_TIMER)
{
+ to->cpt_si_timerid = from->si_timerid;
+ to->cpt_si_overrun = from->si_overrun;
to->cpt_si_ptr = (intptr_t) from->si_ptr;
}
else if (to->si_code == SI_USER)
to->cpt_si_pid = from->si_pid;
to->cpt_si_uid = from->si_uid;
}
- else if (to->si_code == SI_TIMER)
+ else if (to->si_code < 0)
{
- to->cpt_si_timerid = from->si_timerid;
- to->cpt_si_overrun = from->si_overrun;
+ to->cpt_si_pid = from->si_pid;
+ to->cpt_si_uid = from->si_uid;
to->cpt_si_ptr = (intptr_t) from->si_ptr;
}
else
to->si_errno = from->si_errno;
to->si_code = from->si_code;
- if (to->si_code < 0)
+ if (to->si_code == SI_TIMER)
{
+ to->si_timerid = from->cpt_si_timerid;
+ to->si_overrun = from->cpt_si_overrun;
to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
}
else if (to->si_code == SI_USER)
to->si_pid = from->cpt_si_pid;
to->si_uid = from->cpt_si_uid;
}
- else if (to->si_code == SI_TIMER)
+ else if (to->si_code < 0)
+ {
+ to->si_pid = from->cpt_si_pid;
+ to->si_uid = from->cpt_si_uid;
+ to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
+ }
+ else
+ {
+ switch (to->si_signo)
+ {
+ case SIGCHLD:
+ to->si_pid = from->cpt_si_pid;
+ to->si_uid = from->cpt_si_uid;
+ to->si_status = from->cpt_si_status;
+ to->si_utime = from->cpt_si_utime;
+ to->si_stime = from->cpt_si_stime;
+ break;
+ case SIGILL:
+ case SIGFPE:
+ case SIGSEGV:
+ case SIGBUS:
+ to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
+ break;
+ case SIGPOLL:
+ to->si_band = from->cpt_si_band;
+ to->si_fd = from->cpt_si_fd;
+ break;
+ default:
+ to->si_pid = from->cpt_si_pid;
+ to->si_uid = from->cpt_si_uid;
+ to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
+ break;
+ }
+ }
+}
+
+static void
+compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
+ siginfo_t *from)
+{
+ memset (to, 0, sizeof (*to));
+
+ to->si_signo = from->si_signo;
+ to->si_errno = from->si_errno;
+ to->si_code = from->si_code;
+
+ if (to->si_code == SI_TIMER)
+ {
+ to->cpt_si_timerid = from->si_timerid;
+ to->cpt_si_overrun = from->si_overrun;
+ to->cpt_si_ptr = (intptr_t) from->si_ptr;
+ }
+ else if (to->si_code == SI_USER)
+ {
+ to->cpt_si_pid = from->si_pid;
+ to->cpt_si_uid = from->si_uid;
+ }
+ else if (to->si_code < 0)
+ {
+ to->cpt_si_pid = from->si_pid;
+ to->cpt_si_uid = from->si_uid;
+ to->cpt_si_ptr = (intptr_t) from->si_ptr;
+ }
+ else
+ {
+ switch (to->si_signo)
+ {
+ case SIGCHLD:
+ to->cpt_si_pid = from->si_pid;
+ to->cpt_si_uid = from->si_uid;
+ to->cpt_si_status = from->si_status;
+ to->cpt_si_utime = from->si_utime;
+ to->cpt_si_stime = from->si_stime;
+ break;
+ case SIGILL:
+ case SIGFPE:
+ case SIGSEGV:
+ case SIGBUS:
+ to->cpt_si_addr = (intptr_t) from->si_addr;
+ break;
+ case SIGPOLL:
+ to->cpt_si_band = from->si_band;
+ to->cpt_si_fd = from->si_fd;
+ break;
+ default:
+ to->cpt_si_pid = from->si_pid;
+ to->cpt_si_uid = from->si_uid;
+ to->cpt_si_ptr = (intptr_t) from->si_ptr;
+ break;
+ }
+ }
+}
+
+static void
+siginfo_from_compat_x32_siginfo (siginfo_t *to,
+ compat_x32_siginfo_t *from)
+{
+ memset (to, 0, sizeof (*to));
+
+ to->si_signo = from->si_signo;
+ to->si_errno = from->si_errno;
+ to->si_code = from->si_code;
+
+ if (to->si_code == SI_TIMER)
{
to->si_timerid = from->cpt_si_timerid;
to->si_overrun = from->cpt_si_overrun;
to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
}
+ else if (to->si_code == SI_USER)
+ {
+ to->si_pid = from->cpt_si_pid;
+ to->si_uid = from->cpt_si_uid;
+ }
+ else if (to->si_code < 0)
+ {
+ to->si_pid = from->cpt_si_pid;
+ to->si_uid = from->cpt_si_uid;
+ to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
+ }
else
{
switch (to->si_signo)
}
}
+/* Is this process 64-bit? */
+static int linux_is_elf64;
#endif /* __x86_64__ */
/* Convert a native/host siginfo object, into/from the siginfo in the
INF. */
static int
-x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
+x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
{
#ifdef __x86_64__
/* Is the inferior 32-bit? If so, then fixup the siginfo object. */
if (register_size (0) == 4)
{
- if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
+ if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
fatal ("unexpected difference in siginfo");
if (direction == 0)
else
siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
+ return 1;
+ }
+ /* No fixup for native x32 GDB. */
+ else if (!linux_is_elf64 && sizeof (void *) == 8)
+ {
+ if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
+ fatal ("unexpected difference in siginfo");
+
+ if (direction == 0)
+ compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
+ native);
+ else
+ siginfo_from_compat_x32_siginfo (native,
+ (struct compat_x32_siginfo *) inf);
+
return 1;
}
#endif
static void
x86_arch_setup (void)
{
-#ifdef __x86_64__
int pid = pid_of (get_thread_lwp (current_inferior));
- char *file = linux_child_pid_to_exec_file (pid);
- int use_64bit = elf_64_file_p (file);
+ unsigned int machine;
+ int is_elf64 = linux_pid_exe_is_elf_64_file (pid, &machine);
- free (file);
+ if (sizeof (void *) == 4)
+ {
+ if (is_elf64 > 0)
+ error (_("Can't debug 64-bit process with 32-bit GDBserver"));
+#ifndef __x86_64__
+ else if (machine == EM_X86_64)
+ error (_("Can't debug x86-64 process with 32-bit GDBserver"));
+#endif
+ }
- if (use_64bit < 0)
+#ifdef __x86_64__
+ if (is_elf64 < 0)
{
/* This can only happen if /proc/<pid>/exe is unreadable,
but "that can't happen" if we've gotten this far.
Fall through and assume this is a 32-bit program. */
}
- else if (use_64bit)
+ else if (machine == EM_X86_64)
{
/* Amd64 doesn't have HAVE_LINUX_USRREGS. */
the_low_target.num_regs = -1;
/* Amd64 has 16 xmm regs. */
num_xmm_registers = 16;
+ linux_is_elf64 = is_elf64;
x86_linux_update_xmltarget ();
return;
}
+
+ linux_is_elf64 = 0;
#endif
/* Ok we have a 32-bit inferior. */
CORE_ADDR lockaddr,
ULONGEST orig_size,
CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
unsigned char *jjump_pad_insn,
ULONGEST *jjump_pad_insn_size,
CORE_ADDR *adjusted_insn_addr,
- CORE_ADDR *adjusted_insn_addr_end)
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
{
unsigned char buf[40];
int i, offset;
+ int64_t loffset;
+
CORE_ADDR buildaddr = *jump_entry;
/* Build the jump pad. */
*adjusted_insn_addr_end = buildaddr;
/* Finally, write a jump back to the program. */
- offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
+
+ loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
+ if (loffset > INT_MAX || loffset < INT_MIN)
+ {
+ sprintf (err,
+ "E.Jump back from jump pad too far from tracepoint "
+ "(offset 0x%" PRIx64 " > int32).", loffset);
+ return 1;
+ }
+
+ offset = (int) loffset;
memcpy (buf, jump_insn, sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
append_insns (&buildaddr, sizeof (jump_insn), buf);
is always done last (by our caller actually), so that we can
install fast tracepoints with threads running. This relies on
the agent's atomic write support. */
- offset = *jump_entry - (tpaddr + sizeof (jump_insn));
+ loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
+ if (loffset > INT_MAX || loffset < INT_MIN)
+ {
+ sprintf (err,
+ "E.Jump pad too far from tracepoint "
+ "(offset 0x%" PRIx64 " > int32).", loffset);
+ return 1;
+ }
+
+ offset = (int) loffset;
+
memcpy (buf, jump_insn, sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
CORE_ADDR lockaddr,
ULONGEST orig_size,
CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
unsigned char *jjump_pad_insn,
ULONGEST *jjump_pad_insn_size,
CORE_ADDR *adjusted_insn_addr,
- CORE_ADDR *adjusted_insn_addr_end)
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
{
unsigned char buf[0x100];
int i, offset;
buf[i++] = 0x0f; /* pop %fs */
buf[i++] = 0xa1;
buf[i++] = 0x07; /* pop %es */
- buf[i++] = 0x1f; /* pop %de */
+ buf[i++] = 0x1f; /* pop %ds */
buf[i++] = 0x9d; /* popf */
buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
buf[i++] = 0xc4;
is always done last (by our caller actually), so that we can
install fast tracepoints with threads running. This relies on
the agent's atomic write support. */
- offset = *jump_entry - (tpaddr + sizeof (jump_insn));
- memcpy (buf, jump_insn, sizeof (jump_insn));
- memcpy (buf + 1, &offset, 4);
- memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
- *jjump_pad_insn_size = sizeof (jump_insn);
+ if (orig_size == 4)
+ {
+ /* Create a trampoline. */
+ *trampoline_size = sizeof (jump_insn);
+ if (!claim_trampoline_space (*trampoline_size, trampoline))
+ {
+ /* No trampoline space available. */
+ strcpy (err,
+ "E.Cannot allocate trampoline space needed for fast "
+ "tracepoints on 4-byte instructions.");
+ return 1;
+ }
+
+ offset = *jump_entry - (*trampoline + sizeof (jump_insn));
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
+
+ /* Use a 16-bit relative jump instruction to jump to the trampoline. */
+ offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
+ memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
+ memcpy (buf + 2, &offset, 2);
+ memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
+ *jjump_pad_insn_size = sizeof (small_jump_insn);
+ }
+ else
+ {
+ /* Else use a 32-bit relative jump instruction. */
+ offset = *jump_entry - (tpaddr + sizeof (jump_insn));
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
+ *jjump_pad_insn_size = sizeof (jump_insn);
+ }
/* Return the end address of our pad. */
*jump_entry = buildaddr;
CORE_ADDR lockaddr,
ULONGEST orig_size,
CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
unsigned char *jjump_pad_insn,
ULONGEST *jjump_pad_insn_size,
CORE_ADDR *adjusted_insn_addr,
- CORE_ADDR *adjusted_insn_addr_end)
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
{
#ifdef __x86_64__
if (register_size (0) == 8)
return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
collector, lockaddr,
orig_size, jump_entry,
+ trampoline, trampoline_size,
jjump_pad_insn,
jjump_pad_insn_size,
adjusted_insn_addr,
- adjusted_insn_addr_end);
+ adjusted_insn_addr_end,
+ err);
#endif
return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
collector, lockaddr,
orig_size, jump_entry,
+ trampoline, trampoline_size,
jjump_pad_insn,
jjump_pad_insn_size,
adjusted_insn_addr,
- adjusted_insn_addr_end);
+ adjusted_insn_addr_end,
+ err);
}
-/* This is initialized assuming an amd64 target.
- x86_arch_setup will correct it for i386 or amd64 targets. */
+/* Return the minimum instruction length for fast tracepoints on x86/x86-64
+ architectures. */
-struct linux_target_ops the_low_target =
+static int
+x86_get_min_fast_tracepoint_insn_len (void)
{
- x86_arch_setup,
- -1,
+ static int warned_about_fast_tracepoints = 0;
+
+#ifdef __x86_64__
+ /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
+ used for fast tracepoints. */
+ if (register_size (0) == 8)
+ return 5;
+#endif
+
+ if (agent_loaded_p ())
+ {
+ char errbuf[IPA_BUFSIZ];
+
+ errbuf[0] = '\0';
+
+ /* On x86, if trampolines are available, then 4-byte jump instructions
+ with a 2-byte offset may be used, otherwise 5-byte jump instructions
+ with a 4-byte offset are used instead. */
+ if (have_fast_tracepoint_trampoline_buffer (errbuf))
+ return 4;
+ else
+ {
+ /* GDB has no channel to explain to user why a shorter fast
+ tracepoint is not possible, but at least make GDBserver
+ mention that something has gone awry. */
+ if (!warned_about_fast_tracepoints)
+ {
+ warning ("4-byte fast tracepoints not available; %s\n", errbuf);
+ warned_about_fast_tracepoints = 1;
+ }
+ return 5;
+ }
+ }
+ else
+ {
+ /* Indicate that the minimum length is currently unknown since the IPA
+ has not loaded yet. */
+ return 0;
+ }
+}
+
+static void
+add_insns (unsigned char *start, int len)
+{
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ if (debug_threads)
+ fprintf (stderr, "Adding %d bytes of insn at %s\n",
+ len, paddress (buildaddr));
+
+ append_insns (&buildaddr, len, start);
+ current_insn_ptr = buildaddr;
+}
+
+/* Our general strategy for emitting code is to avoid specifying raw
+ bytes whenever possible, and instead copy a block of inline asm
+ that is embedded in the function. This is a little messy, because
+ we need to keep the compiler from discarding what looks like dead
+ code, plus suppress various warnings. */
+
+#define EMIT_ASM(NAME, INSNS) \
+ do \
+ { \
+ extern unsigned char start_ ## NAME, end_ ## NAME; \
+ add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
+ __asm__ ("jmp end_" #NAME "\n" \
+ "\t" "start_" #NAME ":" \
+ "\t" INSNS "\n" \
+ "\t" "end_" #NAME ":"); \
+ } while (0)
+
+#ifdef __x86_64__
+
+#define EMIT_ASM32(NAME,INSNS) \
+ do \
+ { \
+ extern unsigned char start_ ## NAME, end_ ## NAME; \
+ add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
+ __asm__ (".code32\n" \
+ "\t" "jmp end_" #NAME "\n" \
+ "\t" "start_" #NAME ":\n" \
+ "\t" INSNS "\n" \
+ "\t" "end_" #NAME ":\n" \
+ ".code64\n"); \
+ } while (0)
+
+#else
+
+#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
+
+#endif
+
+#ifdef __x86_64__
+
+static void
+amd64_emit_prologue (void)
+{
+ EMIT_ASM (amd64_prologue,
+ "pushq %rbp\n\t"
+ "movq %rsp,%rbp\n\t"
+ "sub $0x20,%rsp\n\t"
+ "movq %rdi,-8(%rbp)\n\t"
+ "movq %rsi,-16(%rbp)");
+}
+
+
+static void
+amd64_emit_epilogue (void)
+{
+ EMIT_ASM (amd64_epilogue,
+ "movq -16(%rbp),%rdi\n\t"
+ "movq %rax,(%rdi)\n\t"
+ "xor %rax,%rax\n\t"
+ "leave\n\t"
+ "ret");
+}
+
+static void
+amd64_emit_add (void)
+{
+ EMIT_ASM (amd64_add,
+ "add (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_sub (void)
+{
+ EMIT_ASM (amd64_sub,
+ "sub %rax,(%rsp)\n\t"
+ "pop %rax");
+}
+
+static void
+amd64_emit_mul (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_lsh (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_rsh_signed (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_rsh_unsigned (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM (amd64_ext_8,
+ "cbtw\n\t"
+ "cwtl\n\t"
+ "cltq");
+ break;
+ case 16:
+ EMIT_ASM (amd64_ext_16,
+ "cwtl\n\t"
+ "cltq");
+ break;
+ case 32:
+ EMIT_ASM (amd64_ext_32,
+ "cltq");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+amd64_emit_log_not (void)
+{
+ EMIT_ASM (amd64_log_not,
+ "test %rax,%rax\n\t"
+ "sete %cl\n\t"
+ "movzbq %cl,%rax");
+}
+
+static void
+amd64_emit_bit_and (void)
+{
+ EMIT_ASM (amd64_and,
+ "and (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_or (void)
+{
+ EMIT_ASM (amd64_or,
+ "or (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_xor (void)
+{
+ EMIT_ASM (amd64_xor,
+ "xor (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_not (void)
+{
+ EMIT_ASM (amd64_bit_not,
+ "xorq $0xffffffffffffffff,%rax");
+}
+
+static void
+amd64_emit_equal (void)
+{
+ EMIT_ASM (amd64_equal,
+ "cmp %rax,(%rsp)\n\t"
+ "je .Lamd64_equal_true\n\t"
+ "xor %rax,%rax\n\t"
+ "jmp .Lamd64_equal_end\n\t"
+ ".Lamd64_equal_true:\n\t"
+ "mov $0x1,%rax\n\t"
+ ".Lamd64_equal_end:\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_less_signed (void)
+{
+ EMIT_ASM (amd64_less_signed,
+ "cmp %rax,(%rsp)\n\t"
+ "jl .Lamd64_less_signed_true\n\t"
+ "xor %rax,%rax\n\t"
+ "jmp .Lamd64_less_signed_end\n\t"
+ ".Lamd64_less_signed_true:\n\t"
+ "mov $1,%rax\n\t"
+ ".Lamd64_less_signed_end:\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_less_unsigned (void)
+{
+ EMIT_ASM (amd64_less_unsigned,
+ "cmp %rax,(%rsp)\n\t"
+ "jb .Lamd64_less_unsigned_true\n\t"
+ "xor %rax,%rax\n\t"
+ "jmp .Lamd64_less_unsigned_end\n\t"
+ ".Lamd64_less_unsigned_true:\n\t"
+ "mov $1,%rax\n\t"
+ ".Lamd64_less_unsigned_end:\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_ref (int size)
+{
+ switch (size)
+ {
+ case 1:
+ EMIT_ASM (amd64_ref1,
+ "movb (%rax),%al");
+ break;
+ case 2:
+ EMIT_ASM (amd64_ref2,
+ "movw (%rax),%ax");
+ break;
+ case 4:
+ EMIT_ASM (amd64_ref4,
+ "movl (%rax),%eax");
+ break;
+ case 8:
+ EMIT_ASM (amd64_ref8,
+ "movq (%rax),%rax");
+ break;
+ }
+}
+
+static void
+amd64_emit_if_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_if_goto,
+ "mov %rax,%rcx\n\t"
+ "pop %rax\n\t"
+ "cmp $0,%rcx\n\t"
+ ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
+ if (offset_p)
+ *offset_p = 10;
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+amd64_emit_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_goto,
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
+ if (offset_p)
+ *offset_p = 1;
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+ int diff = (to - (from + size));
+ unsigned char buf[sizeof (int)];
+
+ if (size != 4)
+ {
+ emit_error = 1;
+ return;
+ }
+
+ memcpy (buf, &diff, sizeof (int));
+ write_inferior_memory (from, buf, sizeof (int));
+}
+
+static void
+amd64_emit_const (LONGEST num)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
+ memcpy (&buf[i], &num, sizeof (num));
+ i += 8;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+amd64_emit_call (CORE_ADDR fn)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+ LONGEST offset64;
+
+ /* The destination function being in the shared library, may be
+ >31-bits away off the compiled code pad. */
+
+ buildaddr = current_insn_ptr;
+
+ offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
+
+ i = 0;
+
+ if (offset64 > INT_MAX || offset64 < INT_MIN)
+ {
+ /* Offset is too large for a call. Use callq, but that requires
+ a register, so avoid it if possible. Use r10, since it is
+ call-clobbered, we don't have to push/pop it. */
+ buf[i++] = 0x48; /* mov $fn,%r10 */
+ buf[i++] = 0xba;
+ memcpy (buf + i, &fn, 8);
+ i += 8;
+ buf[i++] = 0xff; /* callq *%r10 */
+ buf[i++] = 0xd2;
+ }
+ else
+ {
+ int offset32 = offset64; /* we know we can't overflow here. */
+ memcpy (buf + i, &offset32, 4);
+ i += 4;
+ }
+
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+amd64_emit_reg (int reg)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ /* Assume raw_regs is still in %rdi. */
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xbe; /* mov $<n>,%esi */
+ memcpy (&buf[i], ®, sizeof (reg));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ amd64_emit_call (get_raw_reg_func_addr ());
+}
+
+static void
+amd64_emit_pop (void)
+{
+ EMIT_ASM (amd64_pop,
+ "pop %rax");
+}
+
+static void
+amd64_emit_stack_flush (void)
+{
+ EMIT_ASM (amd64_stack_flush,
+ "push %rax");
+}
+
+static void
+amd64_emit_zero_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM (amd64_zero_ext_8,
+ "and $0xff,%rax");
+ break;
+ case 16:
+ EMIT_ASM (amd64_zero_ext_16,
+ "and $0xffff,%rax");
+ break;
+ case 32:
+ EMIT_ASM (amd64_zero_ext_32,
+ "mov $0xffffffff,%rcx\n\t"
+ "and %rcx,%rax");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+amd64_emit_swap (void)
+{
+ EMIT_ASM (amd64_swap,
+ "mov %rax,%rcx\n\t"
+ "pop %rax\n\t"
+ "push %rcx");
+}
+
+static void
+amd64_emit_stack_adjust (int n)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
+ buf[i++] = 0x8d;
+ buf[i++] = 0x64;
+ buf[i++] = 0x24;
+ /* This only handles adjustments up to 16, but we don't expect any more. */
+ buf[i++] = n * 8;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'. */
+
+static void
+amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xbf; /* movl $<n>,%edi */
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ amd64_emit_call (fn);
+}
+
+/* FN's prototype is `void(*fn)(int,LONGEST)'. */
+
+static void
+amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xbf; /* movl $<n>,%edi */
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ EMIT_ASM (amd64_void_call_2_a,
+ /* Save away a copy of the stack top. */
+ "push %rax\n\t"
+ /* Also pass top as the second argument. */
+ "mov %rax,%rsi");
+ amd64_emit_call (fn);
+ EMIT_ASM (amd64_void_call_2_b,
+ /* Restore the stack top, %rax may have been trashed. */
+ "pop %rax");
+}
+
+void
+amd64_emit_eq_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_eq,
+ "cmp %rax,(%rsp)\n\t"
+ "jne .Lamd64_eq_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_eq_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_ne_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_ne,
+ "cmp %rax,(%rsp)\n\t"
+ "je .Lamd64_ne_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_ne_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_lt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_lt,
+ "cmp %rax,(%rsp)\n\t"
+ "jnl .Lamd64_lt_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_lt_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_le_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_le,
+ "cmp %rax,(%rsp)\n\t"
+ "jnle .Lamd64_le_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_le_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_gt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_gt,
+ "cmp %rax,(%rsp)\n\t"
+ "jng .Lamd64_gt_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_gt_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_ge_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_ge,
+ "cmp %rax,(%rsp)\n\t"
+ "jnge .Lamd64_ge_fallthru\n\t"
+ ".Lamd64_ge_jump:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_ge_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+struct emit_ops amd64_emit_ops =
+ {
+ amd64_emit_prologue,
+ amd64_emit_epilogue,
+ amd64_emit_add,
+ amd64_emit_sub,
+ amd64_emit_mul,
+ amd64_emit_lsh,
+ amd64_emit_rsh_signed,
+ amd64_emit_rsh_unsigned,
+ amd64_emit_ext,
+ amd64_emit_log_not,
+ amd64_emit_bit_and,
+ amd64_emit_bit_or,
+ amd64_emit_bit_xor,
+ amd64_emit_bit_not,
+ amd64_emit_equal,
+ amd64_emit_less_signed,
+ amd64_emit_less_unsigned,
+ amd64_emit_ref,
+ amd64_emit_if_goto,
+ amd64_emit_goto,
+ amd64_write_goto_address,
+ amd64_emit_const,
+ amd64_emit_call,
+ amd64_emit_reg,
+ amd64_emit_pop,
+ amd64_emit_stack_flush,
+ amd64_emit_zero_ext,
+ amd64_emit_swap,
+ amd64_emit_stack_adjust,
+ amd64_emit_int_call_1,
+ amd64_emit_void_call_2,
+ amd64_emit_eq_goto,
+ amd64_emit_ne_goto,
+ amd64_emit_lt_goto,
+ amd64_emit_le_goto,
+ amd64_emit_gt_goto,
+ amd64_emit_ge_goto
+ };
+
+#endif /* __x86_64__ */
+
+static void
+i386_emit_prologue (void)
+{
+ EMIT_ASM32 (i386_prologue,
+ "push %ebp\n\t"
+ "mov %esp,%ebp\n\t"
+ "push %ebx");
+ /* At this point, the raw regs base address is at 8(%ebp), and the
+ value pointer is at 12(%ebp). */
+}
+
+static void
+i386_emit_epilogue (void)
+{
+ EMIT_ASM32 (i386_epilogue,
+ "mov 12(%ebp),%ecx\n\t"
+ "mov %eax,(%ecx)\n\t"
+ "mov %ebx,0x4(%ecx)\n\t"
+ "xor %eax,%eax\n\t"
+ "pop %ebx\n\t"
+ "pop %ebp\n\t"
+ "ret");
+}
+
+static void
+i386_emit_add (void)
+{
+ EMIT_ASM32 (i386_add,
+ "add (%esp),%eax\n\t"
+ "adc 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_sub (void)
+{
+ EMIT_ASM32 (i386_sub,
+ "subl %eax,(%esp)\n\t"
+ "sbbl %ebx,4(%esp)\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t");
+}
+
+static void
+i386_emit_mul (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_lsh (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_rsh_signed (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_rsh_unsigned (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM32 (i386_ext_8,
+ "cbtw\n\t"
+ "cwtl\n\t"
+ "movl %eax,%ebx\n\t"
+ "sarl $31,%ebx");
+ break;
+ case 16:
+ EMIT_ASM32 (i386_ext_16,
+ "cwtl\n\t"
+ "movl %eax,%ebx\n\t"
+ "sarl $31,%ebx");
+ break;
+ case 32:
+ EMIT_ASM32 (i386_ext_32,
+ "movl %eax,%ebx\n\t"
+ "sarl $31,%ebx");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+i386_emit_log_not (void)
+{
+ EMIT_ASM32 (i386_log_not,
+ "or %ebx,%eax\n\t"
+ "test %eax,%eax\n\t"
+ "sete %cl\n\t"
+ "xor %ebx,%ebx\n\t"
+ "movzbl %cl,%eax");
+}
+
+static void
+i386_emit_bit_and (void)
+{
+ EMIT_ASM32 (i386_and,
+ "and (%esp),%eax\n\t"
+ "and 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_or (void)
+{
+ EMIT_ASM32 (i386_or,
+ "or (%esp),%eax\n\t"
+ "or 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_xor (void)
+{
+ EMIT_ASM32 (i386_xor,
+ "xor (%esp),%eax\n\t"
+ "xor 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_not (void)
+{
+ EMIT_ASM32 (i386_bit_not,
+ "xor $0xffffffff,%eax\n\t"
+ "xor $0xffffffff,%ebx\n\t");
+}
+
+static void
+i386_emit_equal (void)
+{
+ EMIT_ASM32 (i386_equal,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jne .Li386_equal_false\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "je .Li386_equal_true\n\t"
+ ".Li386_equal_false:\n\t"
+ "xor %eax,%eax\n\t"
+ "jmp .Li386_equal_end\n\t"
+ ".Li386_equal_true:\n\t"
+ "mov $1,%eax\n\t"
+ ".Li386_equal_end:\n\t"
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_less_signed (void)
+{
+ EMIT_ASM32 (i386_less_signed,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jl .Li386_less_signed_true\n\t"
+ "jne .Li386_less_signed_false\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jl .Li386_less_signed_true\n\t"
+ ".Li386_less_signed_false:\n\t"
+ "xor %eax,%eax\n\t"
+ "jmp .Li386_less_signed_end\n\t"
+ ".Li386_less_signed_true:\n\t"
+ "mov $1,%eax\n\t"
+ ".Li386_less_signed_end:\n\t"
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_less_unsigned (void)
+{
+ EMIT_ASM32 (i386_less_unsigned,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jb .Li386_less_unsigned_true\n\t"
+ "jne .Li386_less_unsigned_false\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jb .Li386_less_unsigned_true\n\t"
+ ".Li386_less_unsigned_false:\n\t"
+ "xor %eax,%eax\n\t"
+ "jmp .Li386_less_unsigned_end\n\t"
+ ".Li386_less_unsigned_true:\n\t"
+ "mov $1,%eax\n\t"
+ ".Li386_less_unsigned_end:\n\t"
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_ref (int size)
+{
+ switch (size)
+ {
+ case 1:
+ EMIT_ASM32 (i386_ref1,
+ "movb (%eax),%al");
+ break;
+ case 2:
+ EMIT_ASM32 (i386_ref2,
+ "movw (%eax),%ax");
+ break;
+ case 4:
+ EMIT_ASM32 (i386_ref4,
+ "movl (%eax),%eax");
+ break;
+ case 8:
+ EMIT_ASM32 (i386_ref8,
+ "movl 4(%eax),%ebx\n\t"
+ "movl (%eax),%eax");
+ break;
+ }
+}
+
+static void
+i386_emit_if_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (i386_if_goto,
+ "mov %eax,%ecx\n\t"
+ "or %ebx,%ecx\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ "cmpl $0,%ecx\n\t"
+ /* Don't trust the assembler to choose the right jump */
+ ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
+
+ if (offset_p)
+ *offset_p = 11; /* be sure that this matches the sequence above */
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+i386_emit_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (i386_goto,
+ /* Don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
+ if (offset_p)
+ *offset_p = 1;
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+ int diff = (to - (from + size));
+ unsigned char buf[sizeof (int)];
+
+ /* We're only doing 4-byte sizes at the moment. */
+ if (size != 4)
+ {
+ emit_error = 1;
+ return;
+ }
+
+ memcpy (buf, &diff, sizeof (int));
+ write_inferior_memory (from, buf, sizeof (int));
+}
+
+static void
+i386_emit_const (LONGEST num)
+{
+ unsigned char buf[16];
+ int i, hi, lo;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0xb8; /* mov $<n>,%eax */
+ lo = num & 0xffffffff;
+ memcpy (&buf[i], &lo, sizeof (lo));
+ i += 4;
+ hi = ((num >> 32) & 0xffffffff);
+ if (hi)
+ {
+ buf[i++] = 0xbb; /* mov $<n>,%ebx */
+ memcpy (&buf[i], &hi, sizeof (hi));
+ i += 4;
+ }
+ else
+ {
+ buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
+ }
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+i386_emit_call (CORE_ADDR fn)
+{
+ unsigned char buf[16];
+ int i, offset;
+ CORE_ADDR buildaddr;
+
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xe8; /* call <reladdr> */
+ offset = ((int) fn) - (buildaddr + 5);
+ memcpy (buf + 1, &offset, 4);
+ append_insns (&buildaddr, 5, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+i386_emit_reg (int reg)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ EMIT_ASM32 (i386_reg_a,
+ "sub $0x8,%esp");
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xb8; /* mov $<n>,%eax */
+ memcpy (&buf[i], ®, sizeof (reg));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ EMIT_ASM32 (i386_reg_b,
+ "mov %eax,4(%esp)\n\t"
+ "mov 8(%ebp),%eax\n\t"
+ "mov %eax,(%esp)");
+ i386_emit_call (get_raw_reg_func_addr ());
+ EMIT_ASM32 (i386_reg_c,
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_pop (void)
+{
+ EMIT_ASM32 (i386_pop,
+ "pop %eax\n\t"
+ "pop %ebx");
+}
+
+static void
+i386_emit_stack_flush (void)
+{
+ EMIT_ASM32 (i386_stack_flush,
+ "push %ebx\n\t"
+ "push %eax");
+}
+
+static void
+i386_emit_zero_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM32 (i386_zero_ext_8,
+ "and $0xff,%eax\n\t"
+ "xor %ebx,%ebx");
+ break;
+ case 16:
+ EMIT_ASM32 (i386_zero_ext_16,
+ "and $0xffff,%eax\n\t"
+ "xor %ebx,%ebx");
+ break;
+ case 32:
+ EMIT_ASM32 (i386_zero_ext_32,
+ "xor %ebx,%ebx");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+i386_emit_swap (void)
+{
+ EMIT_ASM32 (i386_swap,
+ "mov %eax,%ecx\n\t"
+ "mov %ebx,%edx\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ "push %edx\n\t"
+ "push %ecx");
+}
+
+static void
+i386_emit_stack_adjust (int n)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
+ buf[i++] = 0x64;
+ buf[i++] = 0x24;
+ buf[i++] = n * 8;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'. */
+
+static void
+i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ EMIT_ASM32 (i386_int_call_1_a,
+ /* Reserve a bit of stack space. */
+ "sub $0x8,%esp");
+ /* Put the one argument on the stack. */
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
+ buf[i++] = 0x04;
+ buf[i++] = 0x24;
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ i386_emit_call (fn);
+ EMIT_ASM32 (i386_int_call_1_c,
+ "mov %edx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+/* FN's prototype is `void(*fn)(int,LONGEST)'. */
+
+static void
+i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ EMIT_ASM32 (i386_void_call_2_a,
+ /* Preserve %eax only; we don't have to worry about %ebx. */
+ "push %eax\n\t"
+ /* Reserve a bit of stack space for arguments. */
+ "sub $0x10,%esp\n\t"
+ /* Copy "top" to the second argument position. (Note that
+ we can't assume function won't scribble on its
+ arguments, so don't try to restore from this.) */
+ "mov %eax,4(%esp)\n\t"
+ "mov %ebx,8(%esp)");
+ /* Put the first argument on the stack. */
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
+ buf[i++] = 0x04;
+ buf[i++] = 0x24;
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ i386_emit_call (fn);
+ EMIT_ASM32 (i386_void_call_2_b,
+ "lea 0x10(%esp),%esp\n\t"
+ /* Restore original stack top. */
+ "pop %eax");
+}
+
+
+void
+i386_emit_eq_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (eq,
+ /* Check low half first, more likely to be decider */
+ "cmpl %eax,(%esp)\n\t"
+ "jne .Leq_fallthru\n\t"
+ "cmpl %ebx,4(%esp)\n\t"
+ "jne .Leq_fallthru\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Leq_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 18;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_ne_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (ne,
+ /* Check low half first, more likely to be decider */
+ "cmpl %eax,(%esp)\n\t"
+ "jne .Lne_jump\n\t"
+ "cmpl %ebx,4(%esp)\n\t"
+ "je .Lne_fallthru\n\t"
+ ".Lne_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lne_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 18;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_lt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (lt,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jl .Llt_jump\n\t"
+ "jne .Llt_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jnl .Llt_fallthru\n\t"
+ ".Llt_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Llt_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_le_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (le,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jle .Lle_jump\n\t"
+ "jne .Lle_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jnle .Lle_fallthru\n\t"
+ ".Lle_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lle_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_gt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (gt,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jg .Lgt_jump\n\t"
+ "jne .Lgt_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jng .Lgt_fallthru\n\t"
+ ".Lgt_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lgt_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_ge_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (ge,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jge .Lge_jump\n\t"
+ "jne .Lge_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jnge .Lge_fallthru\n\t"
+ ".Lge_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lge_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+struct emit_ops i386_emit_ops =
+ {
+ i386_emit_prologue,
+ i386_emit_epilogue,
+ i386_emit_add,
+ i386_emit_sub,
+ i386_emit_mul,
+ i386_emit_lsh,
+ i386_emit_rsh_signed,
+ i386_emit_rsh_unsigned,
+ i386_emit_ext,
+ i386_emit_log_not,
+ i386_emit_bit_and,
+ i386_emit_bit_or,
+ i386_emit_bit_xor,
+ i386_emit_bit_not,
+ i386_emit_equal,
+ i386_emit_less_signed,
+ i386_emit_less_unsigned,
+ i386_emit_ref,
+ i386_emit_if_goto,
+ i386_emit_goto,
+ i386_write_goto_address,
+ i386_emit_const,
+ i386_emit_call,
+ i386_emit_reg,
+ i386_emit_pop,
+ i386_emit_stack_flush,
+ i386_emit_zero_ext,
+ i386_emit_swap,
+ i386_emit_stack_adjust,
+ i386_emit_int_call_1,
+ i386_emit_void_call_2,
+ i386_emit_eq_goto,
+ i386_emit_ne_goto,
+ i386_emit_lt_goto,
+ i386_emit_le_goto,
+ i386_emit_gt_goto,
+ i386_emit_ge_goto
+ };
+
+
+static struct emit_ops *
+x86_emit_ops (void)
+{
+#ifdef __x86_64__
+ int use_64bit = register_size (0) == 8;
+
+ if (use_64bit)
+ return &amd64_emit_ops;
+ else
+#endif
+ return &i386_emit_ops;
+}
+
+/* This is initialized assuming an amd64 target.
+ x86_arch_setup will correct it for i386 or amd64 targets. */
+
+struct linux_target_ops the_low_target =
+{
+ x86_arch_setup,
+ -1,
+ NULL,
NULL,
NULL,
NULL,
+ NULL, /* fetch_register */
x86_get_pc,
x86_set_pc,
x86_breakpoint,
x86_linux_process_qsupported,
x86_supports_tracepoints,
x86_get_thread_area,
- x86_install_fast_tracepoint_jump_pad
+ x86_install_fast_tracepoint_jump_pad,
+ x86_emit_ops,
+ x86_get_min_fast_tracepoint_insn_len,
};