X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Fgdbserver%2Flinux-x86-low.c;h=88f177d763808bf6cd812bba6d31b3c27a98d322;hb=b4570e4b3032f9f4a508dbacd8136f79cf42c249;hp=2f17f45927dc565ff946757aa8eba71da257b34b;hpb=aa5ca48fad05503860c254806750c8a304ec9fc5;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/gdbserver/linux-x86-low.c b/gdb/gdbserver/linux-x86-low.c index 2f17f45927..f09871ac36 100644 --- a/gdb/gdbserver/linux-x86-low.c +++ b/gdb/gdbserver/linux-x86-low.c @@ -1,7 +1,6 @@ /* GNU/Linux/x86-64 specific low level interface, for the remote server for GDB. - Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009 - Free Software Foundation, Inc. + Copyright (C) 2002-2017 Free Software Foundation, Inc. This file is part of GDB. @@ -18,27 +17,63 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . */ -#include -#include #include "server.h" +#include +#include +#include #include "linux-low.h" #include "i387-fp.h" -#include "i386-low.h" +#include "x86-low.h" +#include "x86-xstate.h" +#include "nat/gdb_ptrace.h" + +#ifdef __x86_64__ +#include "nat/amd64-linux-siginfo.h" +#endif #include "gdb_proc_service.h" +/* Don't include elf/common.h if linux/elf.h got included by + gdb_proc_service.h. */ +#ifndef ELFMAG0 +#include "elf/common.h" +#endif + +#include "agent.h" +#include "tdesc.h" +#include "tracepoint.h" +#include "ax.h" +#include "nat/linux-nat.h" +#include "nat/x86-linux.h" +#include "nat/x86-linux-dregs.h" +#include "linux-x86-tdesc.h" + +#ifdef __x86_64__ +static struct target_desc *tdesc_amd64_linux_no_xml; +#endif +static struct target_desc *tdesc_i386_linux_no_xml; + + +static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 }; +static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 }; -/* NOTE: gdb_proc_service.h may include linux/elf.h. - We need Elf32_Phdr. If we don't get linux/elf.h we could include - elf.h like linux-ppc-low.c does. */ +/* Backward compatibility for gdb without XML support. */ -/* Defined in auto-generated file reg-i386-linux.c. */ -void init_registers_i386_linux (void); -/* Defined in auto-generated file reg-x86-64-linux.c. */ -void init_registers_x86_64_linux (void); +static const char *xmltarget_i386_linux_no_xml = "@\ +i386\ +GNU/Linux\ +"; + +#ifdef __x86_64__ +static const char *xmltarget_amd64_linux_no_xml = "@\ +i386:x86-64\ +GNU/Linux\ +"; +#endif #include #include -#include +#include "nat/gdb_ptrace.h" +#include #ifndef PTRACE_GET_THREAD_AREA #define PTRACE_GET_THREAD_AREA 25 @@ -62,15 +97,7 @@ void init_registers_x86_64_linux (void); struct arch_process_info { - struct i386_debug_reg_state debug_reg_state; -}; - -/* Per-thread arch-specific data we want to keep. */ - -struct arch_lwp_info -{ - /* Non-zero if our copy differs from what's recorded in the thread. */ - int debug_registers_changed; + struct x86_debug_reg_state debug_reg_state; }; #ifdef __x86_64__ @@ -90,6 +117,7 @@ static /*const*/ int i386_regmap[] = /* So code below doesn't have to care, i386 or amd64. */ #define ORIG_EAX ORIG_RAX +#define REGSIZE 8 static const int x86_64_regmap[] = { @@ -102,11 +130,30 @@ static const int x86_64_regmap[] = -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, - ORIG_RAX * 8 + -1, + -1, -1, -1, -1, -1, -1, -1, -1, + ORIG_RAX * 8, +#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE + 21 * 8, 22 * 8, +#else + -1, -1, +#endif + -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */ + -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */ + -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */ + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */ + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */ + -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */ + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1 /* pkru */ }; #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0])) +#define X86_64_USER_REGS (GS + 1) #else /* ! __x86_64__ */ @@ -122,16 +169,34 @@ static /*const*/ int i386_regmap[] = #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0])) +#define REGSIZE 4 + +#endif + +#ifdef __x86_64__ + +/* Returns true if the current inferior belongs to a x86-64 process, + per the tdesc. */ + +static int +is_64bit_tdesc (void) +{ + struct regcache *regcache = get_thread_regcache (current_thread, 0); + + return register_size (regcache->tdesc, 0) == 8; +} + #endif + /* Called by libthread_db. */ ps_err_e -ps_get_thread_area (const struct ps_prochandle *ph, +ps_get_thread_area (struct ps_prochandle *ph, lwpid_t lwpid, int idx, void **base) { #ifdef __x86_64__ - int use_64bit = register_size (0) == 8; + int use_64bit = is_64bit_tdesc (); if (use_64bit) { @@ -159,167 +224,276 @@ ps_get_thread_area (const struct ps_prochandle *ph, (void *) (intptr_t) idx, (unsigned long) &desc) < 0) return PS_ERR; - *(int *)base = desc[1]; + /* Ensure we properly extend the value to 64-bits for x86_64. */ + *base = (void *) (uintptr_t) desc[1]; return PS_OK; } } + +/* Get the thread area address. This is used to recognize which + thread is which when tracing with the in-process agent library. We + don't read anything from the address, and treat it as opaque; it's + the address itself that we assume is unique per-thread. */ + +static int +x86_get_thread_area (int lwpid, CORE_ADDR *addr) +{ +#ifdef __x86_64__ + int use_64bit = is_64bit_tdesc (); + + if (use_64bit) + { + void *base; + if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0) + { + *addr = (CORE_ADDR) (uintptr_t) base; + return 0; + } + + return -1; + } +#endif + + { + struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid)); + struct thread_info *thr = get_lwp_thread (lwp); + struct regcache *regcache = get_thread_regcache (thr, 1); + unsigned int desc[4]; + ULONGEST gs = 0; + const int reg_thread_area = 3; /* bits to scale down register value. */ + int idx; + + collect_register_by_name (regcache, "gs", &gs); + + idx = gs >> reg_thread_area; + + if (ptrace (PTRACE_GET_THREAD_AREA, + lwpid_of (thr), + (void *) (long) idx, (unsigned long) &desc) < 0) + return -1; + + *addr = desc[1]; + return 0; + } +} + + static int -i386_cannot_store_register (int regno) +x86_cannot_store_register (int regno) { +#ifdef __x86_64__ + if (is_64bit_tdesc ()) + return 0; +#endif + return regno >= I386_NUM_REGS; } static int -i386_cannot_fetch_register (int regno) +x86_cannot_fetch_register (int regno) { +#ifdef __x86_64__ + if (is_64bit_tdesc ()) + return 0; +#endif + return regno >= I386_NUM_REGS; } static void -x86_fill_gregset (void *buf) +x86_fill_gregset (struct regcache *regcache, void *buf) { int i; #ifdef __x86_64__ - if (register_size (0) == 8) + if (register_size (regcache->tdesc, 0) == 8) { for (i = 0; i < X86_64_NUM_REGS; i++) if (x86_64_regmap[i] != -1) - collect_register (i, ((char *) buf) + x86_64_regmap[i]); + collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]); + +#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE + { + unsigned long base; + int lwpid = lwpid_of (current_thread); + + collect_register_by_name (regcache, "fs_base", &base); + ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS); + + collect_register_by_name (regcache, "gs_base", &base); + ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS); + } +#endif + return; } + + /* 32-bit inferior registers need to be zero-extended. + Callers would read uninitialized memory otherwise. */ + memset (buf, 0x00, X86_64_USER_REGS * 8); #endif for (i = 0; i < I386_NUM_REGS; i++) - collect_register (i, ((char *) buf) + i386_regmap[i]); + collect_register (regcache, i, ((char *) buf) + i386_regmap[i]); - collect_register_by_name ("orig_eax", ((char *) buf) + ORIG_EAX * 4); + collect_register_by_name (regcache, "orig_eax", + ((char *) buf) + ORIG_EAX * REGSIZE); } static void -x86_store_gregset (const void *buf) +x86_store_gregset (struct regcache *regcache, const void *buf) { int i; #ifdef __x86_64__ - if (register_size (0) == 8) + if (register_size (regcache->tdesc, 0) == 8) { for (i = 0; i < X86_64_NUM_REGS; i++) if (x86_64_regmap[i] != -1) - supply_register (i, ((char *) buf) + x86_64_regmap[i]); + supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]); + +#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE + { + unsigned long base; + int lwpid = lwpid_of (current_thread); + + if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0) + supply_register_by_name (regcache, "fs_base", &base); + + if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0) + supply_register_by_name (regcache, "gs_base", &base); + } +#endif return; } #endif for (i = 0; i < I386_NUM_REGS; i++) - supply_register (i, ((char *) buf) + i386_regmap[i]); + supply_register (regcache, i, ((char *) buf) + i386_regmap[i]); - supply_register_by_name ("orig_eax", ((char *) buf) + ORIG_EAX * 4); + supply_register_by_name (regcache, "orig_eax", + ((char *) buf) + ORIG_EAX * REGSIZE); } static void -x86_fill_fpregset (void *buf) +x86_fill_fpregset (struct regcache *regcache, void *buf) { #ifdef __x86_64__ - i387_cache_to_fxsave (buf); + i387_cache_to_fxsave (regcache, buf); #else - i387_cache_to_fsave (buf); + i387_cache_to_fsave (regcache, buf); #endif } static void -x86_store_fpregset (const void *buf) +x86_store_fpregset (struct regcache *regcache, const void *buf) { #ifdef __x86_64__ - i387_fxsave_to_cache (buf); + i387_fxsave_to_cache (regcache, buf); #else - i387_fsave_to_cache (buf); + i387_fsave_to_cache (regcache, buf); #endif } #ifndef __x86_64__ static void -x86_fill_fpxregset (void *buf) +x86_fill_fpxregset (struct regcache *regcache, void *buf) { - i387_cache_to_fxsave (buf); + i387_cache_to_fxsave (regcache, buf); } static void -x86_store_fpxregset (const void *buf) +x86_store_fpxregset (struct regcache *regcache, const void *buf) { - i387_fxsave_to_cache (buf); + i387_fxsave_to_cache (regcache, buf); } #endif +static void +x86_fill_xstateregset (struct regcache *regcache, void *buf) +{ + i387_cache_to_xsave (regcache, buf); +} + +static void +x86_store_xstateregset (struct regcache *regcache, const void *buf) +{ + i387_xsave_to_cache (regcache, buf); +} + /* ??? The non-biarch i386 case stores all the i387 regs twice. Once in i387_.*fsave.* and once in i387_.*fxsave.*. This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS doesn't work. IWBN to avoid the duplication in the case where it does work. Maybe the arch_setup routine could check whether it works - and update target_regsets accordingly, maybe by moving target_regsets - to linux_target_ops and set the right one there, rather than having to - modify the target_regsets global. */ + and update the supported regsets accordingly. */ -struct regset_info target_regsets[] = +static struct regset_info x86_regsets[] = { #ifdef HAVE_PTRACE_GETREGS - { PTRACE_GETREGS, PTRACE_SETREGS, sizeof (elf_gregset_t), + { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t), GENERAL_REGS, x86_fill_gregset, x86_store_gregset }, + { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0, + EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset }, # ifndef __x86_64__ # ifdef HAVE_PTRACE_GETFPXREGS - { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, sizeof (elf_fpxregset_t), + { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t), EXTENDED_REGS, x86_fill_fpxregset, x86_store_fpxregset }, # endif # endif - { PTRACE_GETFPREGS, PTRACE_SETFPREGS, sizeof (elf_fpregset_t), + { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t), FP_REGS, x86_fill_fpregset, x86_store_fpregset }, #endif /* HAVE_PTRACE_GETREGS */ - { 0, 0, -1, -1, NULL, NULL } + NULL_REGSET }; static CORE_ADDR -x86_get_pc (void) +x86_get_pc (struct regcache *regcache) { - int use_64bit = register_size (0) == 8; + int use_64bit = register_size (regcache->tdesc, 0) == 8; if (use_64bit) { - unsigned long pc; - collect_register_by_name ("rip", &pc); + uint64_t pc; + + collect_register_by_name (regcache, "rip", &pc); return (CORE_ADDR) pc; } else { - unsigned int pc; - collect_register_by_name ("eip", &pc); + uint32_t pc; + + collect_register_by_name (regcache, "eip", &pc); return (CORE_ADDR) pc; } } static void -x86_set_pc (CORE_ADDR pc) +x86_set_pc (struct regcache *regcache, CORE_ADDR pc) { - int use_64bit = register_size (0) == 8; + int use_64bit = register_size (regcache->tdesc, 0) == 8; if (use_64bit) { - unsigned long newpc = pc; - supply_register_by_name ("rip", &newpc); + uint64_t newpc = pc; + + supply_register_by_name (regcache, "rip", &newpc); } else { - unsigned int newpc = pc; - supply_register_by_name ("eip", &newpc); + uint32_t newpc = pc; + + supply_register_by_name (regcache, "eip", &newpc); } } -static const unsigned char x86_breakpoint[] = { 0xCC }; +static const gdb_byte x86_breakpoint[] = { 0xCC }; #define x86_breakpoint_len 1 static int @@ -327,117 +501,61 @@ x86_breakpoint_at (CORE_ADDR pc) { unsigned char c; - read_inferior_memory (pc, &c, 1); + (*the_target->read_memory) (pc, &c, 1); if (c == 0xCC) return 1; return 0; } -/* Support for debug registers. */ - -static unsigned long -x86_linux_dr_get (ptid_t ptid, int regnum) -{ - int tid; - unsigned long value; - - tid = ptid_get_lwp (ptid); - - errno = 0; - value = ptrace (PTRACE_PEEKUSER, tid, - offsetof (struct user, u_debugreg[regnum]), 0); - if (errno != 0) - error ("Couldn't read debug register"); - - return value; -} - -static void -x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value) -{ - int tid; - - tid = ptid_get_lwp (ptid); - - errno = 0; - ptrace (PTRACE_POKEUSER, tid, - offsetof (struct user, u_debugreg[regnum]), value); - if (errno != 0) - error ("Couldn't write debug register"); -} - -/* Update the inferior's debug register REGNUM from STATE. */ - -void -i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum) -{ - struct inferior_list_entry *lp; - CORE_ADDR addr; - /* Only need to update the threads of this process. */ - int pid = pid_of (get_thread_lwp (current_inferior)); - - if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR)) - fatal ("Invalid debug register %d", regnum); - - addr = state->dr_mirror[regnum]; - - for (lp = all_lwps.head; lp; lp = lp->next) - { - struct lwp_info *lwp = (struct lwp_info *) lp; - - /* The actual update is done later, we just mark that the register - needs updating. */ - if (pid_of (lwp) == pid) - lwp->arch_private->debug_registers_changed = 1; - } -} - -/* Update the inferior's DR7 debug control register from STATE. */ +/* Low-level function vector. */ +struct x86_dr_low_type x86_dr_low = + { + x86_linux_dr_set_control, + x86_linux_dr_set_addr, + x86_linux_dr_get_addr, + x86_linux_dr_get_status, + x86_linux_dr_get_control, + sizeof (void *), + }; + +/* Breakpoint/Watchpoint support. */ -void -i386_dr_low_set_control (const struct i386_debug_reg_state *state) +static int +x86_supports_z_point_type (char z_type) { - struct inferior_list_entry *lp; - /* Only need to update the threads of this process. */ - int pid = pid_of (get_thread_lwp (current_inferior)); - - for (lp = all_lwps.head; lp; lp = lp->next) + switch (z_type) { - struct lwp_info *lwp = (struct lwp_info *) lp; - - /* The actual update is done later, we just mark that the register - needs updating. */ - if (pid_of (lwp) == pid) - lwp->arch_private->debug_registers_changed = 1; + case Z_PACKET_SW_BP: + case Z_PACKET_HW_BP: + case Z_PACKET_WRITE_WP: + case Z_PACKET_ACCESS_WP: + return 1; + default: + return 0; } } -/* Get the value of the DR6 debug status register from the inferior - and record it in STATE. */ - -void -i386_dr_low_get_status (struct i386_debug_reg_state *state) -{ - struct lwp_info *lwp = get_thread_lwp (current_inferior); - ptid_t ptid = ptid_of (lwp); - - state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS); -} - -/* Watchpoint support. */ - static int -x86_insert_point (char type, CORE_ADDR addr, int len) +x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr, + int size, struct raw_breakpoint *bp) { struct process_info *proc = current_process (); + switch (type) { - case '2': - case '3': - case '4': - return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state, - type, addr, len); + case raw_bkpt_type_hw: + case raw_bkpt_type_write_wp: + case raw_bkpt_type_access_wp: + { + enum target_hw_bp_type hw_type + = raw_bkpt_type_to_target_hw_bp_type (type); + struct x86_debug_reg_state *state + = &proc->priv->arch_private->debug_reg_state; + + return x86_dr_insert_watchpoint (state, hw_type, addr, size); + } + default: /* Unsupported. */ return 1; @@ -445,16 +563,24 @@ x86_insert_point (char type, CORE_ADDR addr, int len) } static int -x86_remove_point (char type, CORE_ADDR addr, int len) +x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr, + int size, struct raw_breakpoint *bp) { struct process_info *proc = current_process (); + switch (type) { - case '2': - case '3': - case '4': - return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state, - type, addr, len); + case raw_bkpt_type_hw: + case raw_bkpt_type_write_wp: + case raw_bkpt_type_access_wp: + { + enum target_hw_bp_type hw_type + = raw_bkpt_type_to_target_hw_bp_type (type); + struct x86_debug_reg_state *state + = &proc->priv->arch_private->debug_reg_state; + + return x86_dr_remove_watchpoint (state, hw_type, addr, size); + } default: /* Unsupported. */ return 1; @@ -465,7 +591,7 @@ static int x86_stopped_by_watchpoint (void) { struct process_info *proc = current_process (); - return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state); + return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state); } static CORE_ADDR @@ -473,8 +599,8 @@ x86_stopped_data_address (void) { struct process_info *proc = current_process (); CORE_ADDR addr; - if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state, - &addr)) + if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state, + &addr)) return addr; return 0; } @@ -484,46 +610,49 @@ x86_stopped_data_address (void) static struct arch_process_info * x86_linux_new_process (void) { - struct arch_process_info *info = xcalloc (1, sizeof (*info)); + struct arch_process_info *info = XCNEW (struct arch_process_info); - i386_low_init_dregs (&info->debug_reg_state); + x86_low_init_dregs (&info->debug_reg_state); return info; } -/* Called when a new thread is detected. */ +/* Target routine for linux_new_fork. */ -static struct arch_lwp_info * -x86_linux_new_thread (void) +static void +x86_linux_new_fork (struct process_info *parent, struct process_info *child) { - struct arch_lwp_info *info = xcalloc (1, sizeof (*info)); - - info->debug_registers_changed = 1; - - return info; + /* These are allocated by linux_add_process. */ + gdb_assert (parent->priv != NULL + && parent->priv->arch_private != NULL); + gdb_assert (child->priv != NULL + && child->priv->arch_private != NULL); + + /* Linux kernel before 2.6.33 commit + 72f674d203cd230426437cdcf7dd6f681dad8b0d + will inherit hardware debug registers from parent + on fork/vfork/clone. Newer Linux kernels create such tasks with + zeroed debug registers. + + GDB core assumes the child inherits the watchpoints/hw + breakpoints of the parent, and will remove them all from the + forked off process. Copy the debug registers mirrors into the + new process so that all breakpoints and watchpoints can be + removed together. The debug registers mirror will become zeroed + in the end before detaching the forked off process, thus making + this compatible with older Linux kernels too. */ + + *child->priv->arch_private = *parent->priv->arch_private; } -/* Called when resuming a thread. - If the debug regs have changed, update the thread's copies. */ +/* See nat/x86-dregs.h. */ -static void -x86_linux_prepare_to_resume (struct lwp_info *lwp) +struct x86_debug_reg_state * +x86_debug_reg_state (pid_t pid) { - if (lwp->arch_private->debug_registers_changed) - { - int i; - ptid_t ptid = ptid_of (lwp); - int pid = ptid_get_pid (ptid); - struct process_info *proc = find_process_pid (pid); - struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state; - - for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++) - x86_linux_dr_set (ptid, i, state->dr_mirror[i]); + struct process_info *proc = find_process_pid (pid); - x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror); - - lwp->arch_private->debug_registers_changed = 0; - } + return &proc->priv->arch_private->debug_reg_state; } /* When GDBSERVER is built as a 64-bit application on linux, the @@ -532,313 +661,2212 @@ x86_linux_prepare_to_resume (struct lwp_info *lwp) as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit conversion in-place ourselves. */ -/* These types below (compat_*) define a siginfo type that is layout - compatible with the siginfo type exported by the 32-bit userspace - support. */ +/* Convert a ptrace/host siginfo object, into/from the siginfo in the + layout of the inferiors' architecture. Returns true if any + conversion was done; false otherwise. If DIRECTION is 1, then copy + from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to + INF. */ +static int +x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction) +{ #ifdef __x86_64__ + unsigned int machine; + int tid = lwpid_of (current_thread); + int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine); -typedef int compat_int_t; -typedef unsigned int compat_uptr_t; + /* Is the inferior 32-bit? If so, then fixup the siginfo object. */ + if (!is_64bit_tdesc ()) + return amd64_linux_siginfo_fixup_common (ptrace, inf, direction, + FIXUP_32); + /* No fixup for native x32 GDB. */ + else if (!is_elf64 && sizeof (void *) == 8) + return amd64_linux_siginfo_fixup_common (ptrace, inf, direction, + FIXUP_X32); +#endif -typedef int compat_time_t; -typedef int compat_timer_t; -typedef int compat_clock_t; + return 0; +} + +static int use_xml; -struct compat_timeval -{ - compat_time_t tv_sec; - int tv_usec; -}; +/* Format of XSAVE extended state is: + struct + { + fxsave_bytes[0..463] + sw_usable_bytes[464..511] + xstate_hdr_bytes[512..575] + avx_bytes[576..831] + future_state etc + }; + + Same memory layout will be used for the coredump NT_X86_XSTATE + representing the XSAVE extended state registers. + + The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled + extended state mask, which is the same as the extended control register + 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask + together with the mask saved in the xstate_hdr_bytes to determine what + states the processor/OS supports and what state, used or initialized, + the process/thread is in. */ +#define I386_LINUX_XSAVE_XCR0_OFFSET 464 + +/* Does the current host support the GETFPXREGS request? The header + file may or may not define it, and even if it is defined, the + kernel will return EIO if it's running on a pre-SSE processor. */ +int have_ptrace_getfpxregs = +#ifdef HAVE_PTRACE_GETFPXREGS + -1 +#else + 0 +#endif +; -typedef union compat_sigval -{ - compat_int_t sival_int; - compat_uptr_t sival_ptr; -} compat_sigval_t; +/* Get Linux/x86 target description from running target. */ -typedef struct compat_siginfo +static const struct target_desc * +x86_linux_read_description (void) { - int si_signo; - int si_errno; - int si_code; - - union - { - int _pad[((128 / sizeof (int)) - 3)]; + unsigned int machine; + int is_elf64; + int xcr0_features; + int tid; + static uint64_t xcr0; + struct regset_info *regset; - /* kill() */ - struct - { - unsigned int _pid; - unsigned int _uid; - } _kill; + tid = lwpid_of (current_thread); - /* POSIX.1b timers */ - struct - { - compat_timer_t _tid; - int _overrun; - compat_sigval_t _sigval; - } _timer; + is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine); - /* POSIX.1b signals */ - struct + if (sizeof (void *) == 4) { - unsigned int _pid; - unsigned int _uid; - compat_sigval_t _sigval; - } _rt; + if (is_elf64 > 0) + error (_("Can't debug 64-bit process with 32-bit GDBserver")); +#ifndef __x86_64__ + else if (machine == EM_X86_64) + error (_("Can't debug x86-64 process with 32-bit GDBserver")); +#endif + } - /* SIGCHLD */ - struct +#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS + if (machine == EM_386 && have_ptrace_getfpxregs == -1) { - unsigned int _pid; - unsigned int _uid; - int _status; - compat_clock_t _utime; - compat_clock_t _stime; - } _sigchld; + elf_fpxregset_t fpxregs; - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ - struct - { - unsigned int _addr; - } _sigfault; + if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0) + { + have_ptrace_getfpxregs = 0; + have_ptrace_getregset = 0; + return i386_linux_read_description (X86_XSTATE_X87); + } + else + have_ptrace_getfpxregs = 1; + } +#endif - /* SIGPOLL */ - struct + if (!use_xml) { - int _band; - int _fd; - } _sigpoll; - } _sifields; -} compat_siginfo_t; + x86_xcr0 = X86_XSTATE_SSE_MASK; -#define cpt_si_pid _sifields._kill._pid -#define cpt_si_uid _sifields._kill._uid -#define cpt_si_timerid _sifields._timer._tid -#define cpt_si_overrun _sifields._timer._overrun -#define cpt_si_status _sifields._sigchld._status -#define cpt_si_utime _sifields._sigchld._utime -#define cpt_si_stime _sifields._sigchld._stime -#define cpt_si_ptr _sifields._rt._sigval.sival_ptr -#define cpt_si_addr _sifields._sigfault._addr -#define cpt_si_band _sifields._sigpoll._band -#define cpt_si_fd _sifields._sigpoll._fd - -/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun. - In their place is si_timer1,si_timer2. */ -#ifndef si_timerid -#define si_timerid si_timer1 -#endif -#ifndef si_overrun -#define si_overrun si_timer2 + /* Don't use XML. */ +#ifdef __x86_64__ + if (machine == EM_X86_64) + return tdesc_amd64_linux_no_xml; + else #endif + return tdesc_i386_linux_no_xml; + } -static void -compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from) -{ - memset (to, 0, sizeof (*to)); + if (have_ptrace_getregset == -1) + { + uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))]; + struct iovec iov; - to->si_signo = from->si_signo; - to->si_errno = from->si_errno; - to->si_code = from->si_code; + iov.iov_base = xstateregs; + iov.iov_len = sizeof (xstateregs); - if (to->si_code < 0) - { - to->cpt_si_ptr = (intptr_t) from->si_ptr; - } - else if (to->si_code == SI_USER) - { - to->cpt_si_pid = from->si_pid; - to->cpt_si_uid = from->si_uid; - } - else if (to->si_code == SI_TIMER) - { - to->cpt_si_timerid = from->si_timerid; - to->cpt_si_overrun = from->si_overrun; - to->cpt_si_ptr = (intptr_t) from->si_ptr; - } - else - { - switch (to->si_signo) + /* Check if PTRACE_GETREGSET works. */ + if (ptrace (PTRACE_GETREGSET, tid, + (unsigned int) NT_X86_XSTATE, (long) &iov) < 0) + have_ptrace_getregset = 0; + else { - case SIGCHLD: - to->cpt_si_pid = from->si_pid; - to->cpt_si_uid = from->si_uid; - to->cpt_si_status = from->si_status; - to->cpt_si_utime = from->si_utime; - to->cpt_si_stime = from->si_stime; - break; - case SIGILL: - case SIGFPE: - case SIGSEGV: - case SIGBUS: - to->cpt_si_addr = (intptr_t) from->si_addr; - break; - case SIGPOLL: - to->cpt_si_band = from->si_band; - to->cpt_si_fd = from->si_fd; - break; - default: - to->cpt_si_pid = from->si_pid; - to->cpt_si_uid = from->si_uid; - to->cpt_si_ptr = (intptr_t) from->si_ptr; - break; + have_ptrace_getregset = 1; + + /* Get XCR0 from XSAVE extended state. */ + xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET + / sizeof (uint64_t))]; + + /* Use PTRACE_GETREGSET if it is available. */ + for (regset = x86_regsets; + regset->fill_function != NULL; regset++) + if (regset->get_request == PTRACE_GETREGSET) + regset->size = X86_XSTATE_SIZE (xcr0); + else if (regset->type != GENERAL_REGS) + regset->size = 0; } } -} -static void -siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from) -{ - memset (to, 0, sizeof (*to)); + /* Check the native XCR0 only if PTRACE_GETREGSET is available. */ + xcr0_features = (have_ptrace_getregset + && (xcr0 & X86_XSTATE_ALL_MASK)); - to->si_signo = from->si_signo; - to->si_errno = from->si_errno; - to->si_code = from->si_code; + if (xcr0_features) + x86_xcr0 = xcr0; - if (to->si_code < 0) - { - to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr; - } - else if (to->si_code == SI_USER) - { - to->si_pid = from->cpt_si_pid; - to->si_uid = from->cpt_si_uid; - } - else if (to->si_code == SI_TIMER) + if (machine == EM_X86_64) { - to->si_timerid = from->cpt_si_timerid; - to->si_overrun = from->cpt_si_overrun; - to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr; +#ifdef __x86_64__ + const target_desc *tdesc = NULL; + + if (xcr0_features) + { + tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK, + !is_elf64); + } + + if (tdesc == NULL) + tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64); + return tdesc; +#endif } else { - switch (to->si_signo) - { - case SIGCHLD: - to->si_pid = from->cpt_si_pid; - to->si_uid = from->cpt_si_uid; - to->si_status = from->cpt_si_status; - to->si_utime = from->cpt_si_utime; - to->si_stime = from->cpt_si_stime; - break; - case SIGILL: - case SIGFPE: - case SIGSEGV: - case SIGBUS: - to->si_addr = (void *) (intptr_t) from->cpt_si_addr; - break; - case SIGPOLL: - to->si_band = from->cpt_si_band; - to->si_fd = from->cpt_si_fd; - break; - default: - to->si_pid = from->cpt_si_pid; - to->si_uid = from->cpt_si_uid; - to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr; - break; - } + const target_desc *tdesc = NULL; + + if (xcr0_features) + tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK); + + if (tdesc == NULL) + tdesc = i386_linux_read_description (X86_XSTATE_SSE); + + return tdesc; } -} -#endif /* __x86_64__ */ + gdb_assert_not_reached ("failed to return tdesc"); +} -/* Convert a native/host siginfo object, into/from the siginfo in the - layout of the inferiors' architecture. Returns true if any - conversion was done; false otherwise. If DIRECTION is 1, then copy - from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to - INF. */ +/* Callback for find_inferior. Stops iteration when a thread with a + given PID is found. */ static int -x86_siginfo_fixup (struct siginfo *native, void *inf, int direction) +same_process_callback (struct inferior_list_entry *entry, void *data) { -#ifdef __x86_64__ - /* Is the inferior 32-bit? If so, then fixup the siginfo object. */ - if (register_size (0) == 4) - { - if (sizeof (struct siginfo) != sizeof (compat_siginfo_t)) - fatal ("unexpected difference in siginfo"); - - if (direction == 0) - compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native); - else - siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf); + int pid = *(int *) data; - return 1; - } -#endif - - return 0; + return (ptid_get_pid (entry->id) == pid); } - -/* Initialize gdbserver for the architecture of the inferior. */ + +/* Callback for for_each_inferior. Calls the arch_setup routine for + each process. */ static void -x86_arch_setup (void) +x86_arch_setup_process_callback (struct inferior_list_entry *entry) { -#ifdef __x86_64__ - int pid = pid_of (get_thread_lwp (current_inferior)); - char *file = linux_child_pid_to_exec_file (pid); - int use_64bit = elf_64_file_p (file); - - free (file); - - if (use_64bit < 0) - { - /* This can only happen if /proc//exe is unreadable, - but "that can't happen" if we've gotten this far. - Fall through and assume this is a 32-bit program. */ - } - else if (use_64bit) - { - init_registers_x86_64_linux (); + int pid = ptid_get_pid (entry->id); - /* Amd64 doesn't have HAVE_LINUX_USRREGS. */ - the_low_target.num_regs = -1; - the_low_target.regmap = NULL; - the_low_target.cannot_fetch_register = NULL; - the_low_target.cannot_store_register = NULL; + /* Look up any thread of this processes. */ + current_thread + = (struct thread_info *) find_inferior (&all_threads, + same_process_callback, &pid); - /* Amd64 has 16 xmm regs. */ - num_xmm_registers = 16; + the_low_target.arch_setup (); +} - return; - } -#endif +/* Update all the target description of all processes; a new GDB + connected, and it may or not support xml target descriptions. */ - /* Ok we have a 32-bit inferior. */ +static void +x86_linux_update_xmltarget (void) +{ + struct thread_info *saved_thread = current_thread; - init_registers_i386_linux (); + /* Before changing the register cache's internal layout, flush the + contents of the current valid caches back to the threads, and + release the current regcache objects. */ + regcache_release (); - the_low_target.num_regs = I386_NUM_REGS; - the_low_target.regmap = i386_regmap; - the_low_target.cannot_fetch_register = i386_cannot_fetch_register; - the_low_target.cannot_store_register = i386_cannot_store_register; + for_each_inferior (&all_processes, x86_arch_setup_process_callback); - /* I386 has 8 xmm regs. */ - num_xmm_registers = 8; + current_thread = saved_thread; } -/* This is initialized assuming an amd64 target. - x86_arch_setup will correct it for i386 or amd64 targets. */ +/* Process qSupported query, "xmlRegisters=". Update the buffer size for + PTRACE_GETREGSET. */ -struct linux_target_ops the_low_target = +static void +x86_linux_process_qsupported (char **features, int count) +{ + int i; + + /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters=" + with "i386" in qSupported query, it supports x86 XML target + descriptions. */ + use_xml = 0; + for (i = 0; i < count; i++) + { + const char *feature = features[i]; + + if (startswith (feature, "xmlRegisters=")) + { + char *copy = xstrdup (feature + 13); + char *p; + + for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ",")) + { + if (strcmp (p, "i386") == 0) + { + use_xml = 1; + break; + } + } + + free (copy); + } + } + x86_linux_update_xmltarget (); +} + +/* Common for x86/x86-64. */ + +static struct regsets_info x86_regsets_info = + { + x86_regsets, /* regsets */ + 0, /* num_regsets */ + NULL, /* disabled_regsets */ + }; + +#ifdef __x86_64__ +static struct regs_info amd64_linux_regs_info = + { + NULL, /* regset_bitmap */ + NULL, /* usrregs_info */ + &x86_regsets_info + }; +#endif +static struct usrregs_info i386_linux_usrregs_info = + { + I386_NUM_REGS, + i386_regmap, + }; + +static struct regs_info i386_linux_regs_info = + { + NULL, /* regset_bitmap */ + &i386_linux_usrregs_info, + &x86_regsets_info + }; + +const struct regs_info * +x86_linux_regs_info (void) +{ +#ifdef __x86_64__ + if (is_64bit_tdesc ()) + return &amd64_linux_regs_info; + else +#endif + return &i386_linux_regs_info; +} + +/* Initialize the target description for the architecture of the + inferior. */ + +static void +x86_arch_setup (void) +{ + current_process ()->tdesc = x86_linux_read_description (); +} + +/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return + code. This should only be called if LWP got a SYSCALL_SIGTRAP. */ + +static void +x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno) +{ + int use_64bit = register_size (regcache->tdesc, 0) == 8; + + if (use_64bit) + { + long l_sysno; + + collect_register_by_name (regcache, "orig_rax", &l_sysno); + *sysno = (int) l_sysno; + } + else + collect_register_by_name (regcache, "orig_eax", sysno); +} + +static int +x86_supports_tracepoints (void) +{ + return 1; +} + +static void +append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf) +{ + write_inferior_memory (*to, buf, len); + *to += len; +} + +static int +push_opcode (unsigned char *buf, const char *op) +{ + unsigned char *buf_org = buf; + + while (1) + { + char *endptr; + unsigned long ul = strtoul (op, &endptr, 16); + + if (endptr == op) + break; + + *buf++ = ul; + op = endptr; + } + + return buf - buf_org; +} + +#ifdef __x86_64__ + +/* Build a jump pad that saves registers and calls a collection + function. Writes a jump instruction to the jump pad to + JJUMPAD_INSN. The caller is responsible to write it in at the + tracepoint address. */ + +static int +amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr, + CORE_ADDR collector, + CORE_ADDR lockaddr, + ULONGEST orig_size, + CORE_ADDR *jump_entry, + CORE_ADDR *trampoline, + ULONGEST *trampoline_size, + unsigned char *jjump_pad_insn, + ULONGEST *jjump_pad_insn_size, + CORE_ADDR *adjusted_insn_addr, + CORE_ADDR *adjusted_insn_addr_end, + char *err) +{ + unsigned char buf[40]; + int i, offset; + int64_t loffset; + + CORE_ADDR buildaddr = *jump_entry; + + /* Build the jump pad. */ + + /* First, do tracepoint data collection. Save registers. */ + i = 0; + /* Need to ensure stack pointer saved first. */ + buf[i++] = 0x54; /* push %rsp */ + buf[i++] = 0x55; /* push %rbp */ + buf[i++] = 0x57; /* push %rdi */ + buf[i++] = 0x56; /* push %rsi */ + buf[i++] = 0x52; /* push %rdx */ + buf[i++] = 0x51; /* push %rcx */ + buf[i++] = 0x53; /* push %rbx */ + buf[i++] = 0x50; /* push %rax */ + buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */ + buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */ + buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */ + buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */ + buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */ + buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */ + buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */ + buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */ + buf[i++] = 0x9c; /* pushfq */ + buf[i++] = 0x48; /* movabs ,%rdi */ + buf[i++] = 0xbf; + memcpy (buf + i, &tpaddr, 8); + i += 8; + buf[i++] = 0x57; /* push %rdi */ + append_insns (&buildaddr, i, buf); + + /* Stack space for the collecting_t object. */ + i = 0; + i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */ + i += push_opcode (&buf[i], "48 b8"); /* mov ,%rax */ + memcpy (buf + i, &tpoint, 8); + i += 8; + i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */ + i += push_opcode (&buf[i], + "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */ + i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */ + append_insns (&buildaddr, i, buf); + + /* spin-lock. */ + i = 0; + i += push_opcode (&buf[i], "48 be"); /* movl ,%rsi */ + memcpy (&buf[i], (void *) &lockaddr, 8); + i += 8; + i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */ + i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */ + i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */ + i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */ + i += push_opcode (&buf[i], "75 f4"); /* jne */ + append_insns (&buildaddr, i, buf); + + /* Set up the gdb_collect call. */ + /* At this point, (stack pointer + 0x18) is the base of our saved + register block. */ + + i = 0; + i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */ + i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */ + + /* tpoint address may be 64-bit wide. */ + i += push_opcode (&buf[i], "48 bf"); /* movl ,%rdi */ + memcpy (buf + i, &tpoint, 8); + i += 8; + append_insns (&buildaddr, i, buf); + + /* The collector function being in the shared library, may be + >31-bits away off the jump pad. */ + i = 0; + i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */ + memcpy (buf + i, &collector, 8); + i += 8; + i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */ + append_insns (&buildaddr, i, buf); + + /* Clear the spin-lock. */ + i = 0; + i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */ + i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */ + memcpy (buf + i, &lockaddr, 8); + i += 8; + append_insns (&buildaddr, i, buf); + + /* Remove stack that had been used for the collect_t object. */ + i = 0; + i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */ + append_insns (&buildaddr, i, buf); + + /* Restore register state. */ + i = 0; + buf[i++] = 0x48; /* add $0x8,%rsp */ + buf[i++] = 0x83; + buf[i++] = 0xc4; + buf[i++] = 0x08; + buf[i++] = 0x9d; /* popfq */ + buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */ + buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */ + buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */ + buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */ + buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */ + buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */ + buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */ + buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */ + buf[i++] = 0x58; /* pop %rax */ + buf[i++] = 0x5b; /* pop %rbx */ + buf[i++] = 0x59; /* pop %rcx */ + buf[i++] = 0x5a; /* pop %rdx */ + buf[i++] = 0x5e; /* pop %rsi */ + buf[i++] = 0x5f; /* pop %rdi */ + buf[i++] = 0x5d; /* pop %rbp */ + buf[i++] = 0x5c; /* pop %rsp */ + append_insns (&buildaddr, i, buf); + + /* Now, adjust the original instruction to execute in the jump + pad. */ + *adjusted_insn_addr = buildaddr; + relocate_instruction (&buildaddr, tpaddr); + *adjusted_insn_addr_end = buildaddr; + + /* Finally, write a jump back to the program. */ + + loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn)); + if (loffset > INT_MAX || loffset < INT_MIN) + { + sprintf (err, + "E.Jump back from jump pad too far from tracepoint " + "(offset 0x%" PRIx64 " > int32).", loffset); + return 1; + } + + offset = (int) loffset; + memcpy (buf, jump_insn, sizeof (jump_insn)); + memcpy (buf + 1, &offset, 4); + append_insns (&buildaddr, sizeof (jump_insn), buf); + + /* The jump pad is now built. Wire in a jump to our jump pad. This + is always done last (by our caller actually), so that we can + install fast tracepoints with threads running. This relies on + the agent's atomic write support. */ + loffset = *jump_entry - (tpaddr + sizeof (jump_insn)); + if (loffset > INT_MAX || loffset < INT_MIN) + { + sprintf (err, + "E.Jump pad too far from tracepoint " + "(offset 0x%" PRIx64 " > int32).", loffset); + return 1; + } + + offset = (int) loffset; + + memcpy (buf, jump_insn, sizeof (jump_insn)); + memcpy (buf + 1, &offset, 4); + memcpy (jjump_pad_insn, buf, sizeof (jump_insn)); + *jjump_pad_insn_size = sizeof (jump_insn); + + /* Return the end address of our pad. */ + *jump_entry = buildaddr; + + return 0; +} + +#endif /* __x86_64__ */ + +/* Build a jump pad that saves registers and calls a collection + function. Writes a jump instruction to the jump pad to + JJUMPAD_INSN. The caller is responsible to write it in at the + tracepoint address. */ + +static int +i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr, + CORE_ADDR collector, + CORE_ADDR lockaddr, + ULONGEST orig_size, + CORE_ADDR *jump_entry, + CORE_ADDR *trampoline, + ULONGEST *trampoline_size, + unsigned char *jjump_pad_insn, + ULONGEST *jjump_pad_insn_size, + CORE_ADDR *adjusted_insn_addr, + CORE_ADDR *adjusted_insn_addr_end, + char *err) +{ + unsigned char buf[0x100]; + int i, offset; + CORE_ADDR buildaddr = *jump_entry; + + /* Build the jump pad. */ + + /* First, do tracepoint data collection. Save registers. */ + i = 0; + buf[i++] = 0x60; /* pushad */ + buf[i++] = 0x68; /* push tpaddr aka $pc */ + *((int *)(buf + i)) = (int) tpaddr; + i += 4; + buf[i++] = 0x9c; /* pushf */ + buf[i++] = 0x1e; /* push %ds */ + buf[i++] = 0x06; /* push %es */ + buf[i++] = 0x0f; /* push %fs */ + buf[i++] = 0xa0; + buf[i++] = 0x0f; /* push %gs */ + buf[i++] = 0xa8; + buf[i++] = 0x16; /* push %ss */ + buf[i++] = 0x0e; /* push %cs */ + append_insns (&buildaddr, i, buf); + + /* Stack space for the collecting_t object. */ + i = 0; + i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */ + + /* Build the object. */ + i += push_opcode (&buf[i], "b8"); /* mov ,%eax */ + memcpy (buf + i, &tpoint, 4); + i += 4; + i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */ + + i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */ + i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */ + append_insns (&buildaddr, i, buf); + + /* spin-lock. Note this is using cmpxchg, which leaves i386 behind. + If we cared for it, this could be using xchg alternatively. */ + + i = 0; + i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */ + i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg + %esp, */ + memcpy (&buf[i], (void *) &lockaddr, 4); + i += 4; + i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */ + i += push_opcode (&buf[i], "75 f2"); /* jne */ + append_insns (&buildaddr, i, buf); + + + /* Set up arguments to the gdb_collect call. */ + i = 0; + i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */ + i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */ + i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */ + append_insns (&buildaddr, i, buf); + + i = 0; + i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */ + append_insns (&buildaddr, i, buf); + + i = 0; + i += push_opcode (&buf[i], "c7 04 24"); /* movl ,(%esp) */ + memcpy (&buf[i], (void *) &tpoint, 4); + i += 4; + append_insns (&buildaddr, i, buf); + + buf[0] = 0xe8; /* call */ + offset = collector - (buildaddr + sizeof (jump_insn)); + memcpy (buf + 1, &offset, 4); + append_insns (&buildaddr, 5, buf); + /* Clean up after the call. */ + buf[0] = 0x83; /* add $0x8,%esp */ + buf[1] = 0xc4; + buf[2] = 0x08; + append_insns (&buildaddr, 3, buf); + + + /* Clear the spin-lock. This would need the LOCK prefix on older + broken archs. */ + i = 0; + i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */ + i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */ + memcpy (buf + i, &lockaddr, 4); + i += 4; + append_insns (&buildaddr, i, buf); + + + /* Remove stack that had been used for the collect_t object. */ + i = 0; + i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */ + append_insns (&buildaddr, i, buf); + + i = 0; + buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */ + buf[i++] = 0xc4; + buf[i++] = 0x04; + buf[i++] = 0x17; /* pop %ss */ + buf[i++] = 0x0f; /* pop %gs */ + buf[i++] = 0xa9; + buf[i++] = 0x0f; /* pop %fs */ + buf[i++] = 0xa1; + buf[i++] = 0x07; /* pop %es */ + buf[i++] = 0x1f; /* pop %ds */ + buf[i++] = 0x9d; /* popf */ + buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */ + buf[i++] = 0xc4; + buf[i++] = 0x04; + buf[i++] = 0x61; /* popad */ + append_insns (&buildaddr, i, buf); + + /* Now, adjust the original instruction to execute in the jump + pad. */ + *adjusted_insn_addr = buildaddr; + relocate_instruction (&buildaddr, tpaddr); + *adjusted_insn_addr_end = buildaddr; + + /* Write the jump back to the program. */ + offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn)); + memcpy (buf, jump_insn, sizeof (jump_insn)); + memcpy (buf + 1, &offset, 4); + append_insns (&buildaddr, sizeof (jump_insn), buf); + + /* The jump pad is now built. Wire in a jump to our jump pad. This + is always done last (by our caller actually), so that we can + install fast tracepoints with threads running. This relies on + the agent's atomic write support. */ + if (orig_size == 4) + { + /* Create a trampoline. */ + *trampoline_size = sizeof (jump_insn); + if (!claim_trampoline_space (*trampoline_size, trampoline)) + { + /* No trampoline space available. */ + strcpy (err, + "E.Cannot allocate trampoline space needed for fast " + "tracepoints on 4-byte instructions."); + return 1; + } + + offset = *jump_entry - (*trampoline + sizeof (jump_insn)); + memcpy (buf, jump_insn, sizeof (jump_insn)); + memcpy (buf + 1, &offset, 4); + write_inferior_memory (*trampoline, buf, sizeof (jump_insn)); + + /* Use a 16-bit relative jump instruction to jump to the trampoline. */ + offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff; + memcpy (buf, small_jump_insn, sizeof (small_jump_insn)); + memcpy (buf + 2, &offset, 2); + memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn)); + *jjump_pad_insn_size = sizeof (small_jump_insn); + } + else + { + /* Else use a 32-bit relative jump instruction. */ + offset = *jump_entry - (tpaddr + sizeof (jump_insn)); + memcpy (buf, jump_insn, sizeof (jump_insn)); + memcpy (buf + 1, &offset, 4); + memcpy (jjump_pad_insn, buf, sizeof (jump_insn)); + *jjump_pad_insn_size = sizeof (jump_insn); + } + + /* Return the end address of our pad. */ + *jump_entry = buildaddr; + + return 0; +} + +static int +x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr, + CORE_ADDR collector, + CORE_ADDR lockaddr, + ULONGEST orig_size, + CORE_ADDR *jump_entry, + CORE_ADDR *trampoline, + ULONGEST *trampoline_size, + unsigned char *jjump_pad_insn, + ULONGEST *jjump_pad_insn_size, + CORE_ADDR *adjusted_insn_addr, + CORE_ADDR *adjusted_insn_addr_end, + char *err) +{ +#ifdef __x86_64__ + if (is_64bit_tdesc ()) + return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr, + collector, lockaddr, + orig_size, jump_entry, + trampoline, trampoline_size, + jjump_pad_insn, + jjump_pad_insn_size, + adjusted_insn_addr, + adjusted_insn_addr_end, + err); +#endif + + return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr, + collector, lockaddr, + orig_size, jump_entry, + trampoline, trampoline_size, + jjump_pad_insn, + jjump_pad_insn_size, + adjusted_insn_addr, + adjusted_insn_addr_end, + err); +} + +/* Return the minimum instruction length for fast tracepoints on x86/x86-64 + architectures. */ + +static int +x86_get_min_fast_tracepoint_insn_len (void) +{ + static int warned_about_fast_tracepoints = 0; + +#ifdef __x86_64__ + /* On x86-64, 5-byte jump instructions with a 4-byte offset are always + used for fast tracepoints. */ + if (is_64bit_tdesc ()) + return 5; +#endif + + if (agent_loaded_p ()) + { + char errbuf[IPA_BUFSIZ]; + + errbuf[0] = '\0'; + + /* On x86, if trampolines are available, then 4-byte jump instructions + with a 2-byte offset may be used, otherwise 5-byte jump instructions + with a 4-byte offset are used instead. */ + if (have_fast_tracepoint_trampoline_buffer (errbuf)) + return 4; + else + { + /* GDB has no channel to explain to user why a shorter fast + tracepoint is not possible, but at least make GDBserver + mention that something has gone awry. */ + if (!warned_about_fast_tracepoints) + { + warning ("4-byte fast tracepoints not available; %s\n", errbuf); + warned_about_fast_tracepoints = 1; + } + return 5; + } + } + else + { + /* Indicate that the minimum length is currently unknown since the IPA + has not loaded yet. */ + return 0; + } +} + +static void +add_insns (unsigned char *start, int len) +{ + CORE_ADDR buildaddr = current_insn_ptr; + + if (debug_threads) + debug_printf ("Adding %d bytes of insn at %s\n", + len, paddress (buildaddr)); + + append_insns (&buildaddr, len, start); + current_insn_ptr = buildaddr; +} + +/* Our general strategy for emitting code is to avoid specifying raw + bytes whenever possible, and instead copy a block of inline asm + that is embedded in the function. This is a little messy, because + we need to keep the compiler from discarding what looks like dead + code, plus suppress various warnings. */ + +#define EMIT_ASM(NAME, INSNS) \ + do \ + { \ + extern unsigned char start_ ## NAME, end_ ## NAME; \ + add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \ + __asm__ ("jmp end_" #NAME "\n" \ + "\t" "start_" #NAME ":" \ + "\t" INSNS "\n" \ + "\t" "end_" #NAME ":"); \ + } while (0) + +#ifdef __x86_64__ + +#define EMIT_ASM32(NAME,INSNS) \ + do \ + { \ + extern unsigned char start_ ## NAME, end_ ## NAME; \ + add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \ + __asm__ (".code32\n" \ + "\t" "jmp end_" #NAME "\n" \ + "\t" "start_" #NAME ":\n" \ + "\t" INSNS "\n" \ + "\t" "end_" #NAME ":\n" \ + ".code64\n"); \ + } while (0) + +#else + +#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS) + +#endif + +#ifdef __x86_64__ + +static void +amd64_emit_prologue (void) +{ + EMIT_ASM (amd64_prologue, + "pushq %rbp\n\t" + "movq %rsp,%rbp\n\t" + "sub $0x20,%rsp\n\t" + "movq %rdi,-8(%rbp)\n\t" + "movq %rsi,-16(%rbp)"); +} + + +static void +amd64_emit_epilogue (void) +{ + EMIT_ASM (amd64_epilogue, + "movq -16(%rbp),%rdi\n\t" + "movq %rax,(%rdi)\n\t" + "xor %rax,%rax\n\t" + "leave\n\t" + "ret"); +} + +static void +amd64_emit_add (void) +{ + EMIT_ASM (amd64_add, + "add (%rsp),%rax\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_sub (void) +{ + EMIT_ASM (amd64_sub, + "sub %rax,(%rsp)\n\t" + "pop %rax"); +} + +static void +amd64_emit_mul (void) +{ + emit_error = 1; +} + +static void +amd64_emit_lsh (void) +{ + emit_error = 1; +} + +static void +amd64_emit_rsh_signed (void) +{ + emit_error = 1; +} + +static void +amd64_emit_rsh_unsigned (void) +{ + emit_error = 1; +} + +static void +amd64_emit_ext (int arg) +{ + switch (arg) + { + case 8: + EMIT_ASM (amd64_ext_8, + "cbtw\n\t" + "cwtl\n\t" + "cltq"); + break; + case 16: + EMIT_ASM (amd64_ext_16, + "cwtl\n\t" + "cltq"); + break; + case 32: + EMIT_ASM (amd64_ext_32, + "cltq"); + break; + default: + emit_error = 1; + } +} + +static void +amd64_emit_log_not (void) +{ + EMIT_ASM (amd64_log_not, + "test %rax,%rax\n\t" + "sete %cl\n\t" + "movzbq %cl,%rax"); +} + +static void +amd64_emit_bit_and (void) +{ + EMIT_ASM (amd64_and, + "and (%rsp),%rax\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_bit_or (void) +{ + EMIT_ASM (amd64_or, + "or (%rsp),%rax\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_bit_xor (void) +{ + EMIT_ASM (amd64_xor, + "xor (%rsp),%rax\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_bit_not (void) +{ + EMIT_ASM (amd64_bit_not, + "xorq $0xffffffffffffffff,%rax"); +} + +static void +amd64_emit_equal (void) +{ + EMIT_ASM (amd64_equal, + "cmp %rax,(%rsp)\n\t" + "je .Lamd64_equal_true\n\t" + "xor %rax,%rax\n\t" + "jmp .Lamd64_equal_end\n\t" + ".Lamd64_equal_true:\n\t" + "mov $0x1,%rax\n\t" + ".Lamd64_equal_end:\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_less_signed (void) +{ + EMIT_ASM (amd64_less_signed, + "cmp %rax,(%rsp)\n\t" + "jl .Lamd64_less_signed_true\n\t" + "xor %rax,%rax\n\t" + "jmp .Lamd64_less_signed_end\n\t" + ".Lamd64_less_signed_true:\n\t" + "mov $1,%rax\n\t" + ".Lamd64_less_signed_end:\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_less_unsigned (void) +{ + EMIT_ASM (amd64_less_unsigned, + "cmp %rax,(%rsp)\n\t" + "jb .Lamd64_less_unsigned_true\n\t" + "xor %rax,%rax\n\t" + "jmp .Lamd64_less_unsigned_end\n\t" + ".Lamd64_less_unsigned_true:\n\t" + "mov $1,%rax\n\t" + ".Lamd64_less_unsigned_end:\n\t" + "lea 0x8(%rsp),%rsp"); +} + +static void +amd64_emit_ref (int size) +{ + switch (size) + { + case 1: + EMIT_ASM (amd64_ref1, + "movb (%rax),%al"); + break; + case 2: + EMIT_ASM (amd64_ref2, + "movw (%rax),%ax"); + break; + case 4: + EMIT_ASM (amd64_ref4, + "movl (%rax),%eax"); + break; + case 8: + EMIT_ASM (amd64_ref8, + "movq (%rax),%rax"); + break; + } +} + +static void +amd64_emit_if_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_if_goto, + "mov %rax,%rcx\n\t" + "pop %rax\n\t" + "cmp $0,%rcx\n\t" + ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0"); + if (offset_p) + *offset_p = 10; + if (size_p) + *size_p = 4; +} + +static void +amd64_emit_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_goto, + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0"); + if (offset_p) + *offset_p = 1; + if (size_p) + *size_p = 4; +} + +static void +amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size) +{ + int diff = (to - (from + size)); + unsigned char buf[sizeof (int)]; + + if (size != 4) + { + emit_error = 1; + return; + } + + memcpy (buf, &diff, sizeof (int)); + write_inferior_memory (from, buf, sizeof (int)); +} + +static void +amd64_emit_const (LONGEST num) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr = current_insn_ptr; + + i = 0; + buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $,%rax */ + memcpy (&buf[i], &num, sizeof (num)); + i += 8; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; +} + +static void +amd64_emit_call (CORE_ADDR fn) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + LONGEST offset64; + + /* The destination function being in the shared library, may be + >31-bits away off the compiled code pad. */ + + buildaddr = current_insn_ptr; + + offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */); + + i = 0; + + if (offset64 > INT_MAX || offset64 < INT_MIN) + { + /* Offset is too large for a call. Use callq, but that requires + a register, so avoid it if possible. Use r10, since it is + call-clobbered, we don't have to push/pop it. */ + buf[i++] = 0x48; /* mov $fn,%r10 */ + buf[i++] = 0xba; + memcpy (buf + i, &fn, 8); + i += 8; + buf[i++] = 0xff; /* callq *%r10 */ + buf[i++] = 0xd2; + } + else + { + int offset32 = offset64; /* we know we can't overflow here. */ + + buf[i++] = 0xe8; /* call */ + memcpy (buf + i, &offset32, 4); + i += 4; + } + + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; +} + +static void +amd64_emit_reg (int reg) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + + /* Assume raw_regs is still in %rdi. */ + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xbe; /* mov $,%esi */ + memcpy (&buf[i], ®, sizeof (reg)); + i += 4; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; + amd64_emit_call (get_raw_reg_func_addr ()); +} + +static void +amd64_emit_pop (void) +{ + EMIT_ASM (amd64_pop, + "pop %rax"); +} + +static void +amd64_emit_stack_flush (void) +{ + EMIT_ASM (amd64_stack_flush, + "push %rax"); +} + +static void +amd64_emit_zero_ext (int arg) +{ + switch (arg) + { + case 8: + EMIT_ASM (amd64_zero_ext_8, + "and $0xff,%rax"); + break; + case 16: + EMIT_ASM (amd64_zero_ext_16, + "and $0xffff,%rax"); + break; + case 32: + EMIT_ASM (amd64_zero_ext_32, + "mov $0xffffffff,%rcx\n\t" + "and %rcx,%rax"); + break; + default: + emit_error = 1; + } +} + +static void +amd64_emit_swap (void) +{ + EMIT_ASM (amd64_swap, + "mov %rax,%rcx\n\t" + "pop %rax\n\t" + "push %rcx"); +} + +static void +amd64_emit_stack_adjust (int n) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr = current_insn_ptr; + + i = 0; + buf[i++] = 0x48; /* lea $(%rsp),%rsp */ + buf[i++] = 0x8d; + buf[i++] = 0x64; + buf[i++] = 0x24; + /* This only handles adjustments up to 16, but we don't expect any more. */ + buf[i++] = n * 8; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; +} + +/* FN's prototype is `LONGEST(*fn)(int)'. */ + +static void +amd64_emit_int_call_1 (CORE_ADDR fn, int arg1) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xbf; /* movl $,%edi */ + memcpy (&buf[i], &arg1, sizeof (arg1)); + i += 4; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; + amd64_emit_call (fn); +} + +/* FN's prototype is `void(*fn)(int,LONGEST)'. */ + +static void +amd64_emit_void_call_2 (CORE_ADDR fn, int arg1) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xbf; /* movl $,%edi */ + memcpy (&buf[i], &arg1, sizeof (arg1)); + i += 4; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; + EMIT_ASM (amd64_void_call_2_a, + /* Save away a copy of the stack top. */ + "push %rax\n\t" + /* Also pass top as the second argument. */ + "mov %rax,%rsi"); + amd64_emit_call (fn); + EMIT_ASM (amd64_void_call_2_b, + /* Restore the stack top, %rax may have been trashed. */ + "pop %rax"); +} + +void +amd64_emit_eq_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_eq, + "cmp %rax,(%rsp)\n\t" + "jne .Lamd64_eq_fallthru\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lamd64_eq_fallthru:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax"); + + if (offset_p) + *offset_p = 13; + if (size_p) + *size_p = 4; +} + +void +amd64_emit_ne_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_ne, + "cmp %rax,(%rsp)\n\t" + "je .Lamd64_ne_fallthru\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lamd64_ne_fallthru:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax"); + + if (offset_p) + *offset_p = 13; + if (size_p) + *size_p = 4; +} + +void +amd64_emit_lt_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_lt, + "cmp %rax,(%rsp)\n\t" + "jnl .Lamd64_lt_fallthru\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lamd64_lt_fallthru:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax"); + + if (offset_p) + *offset_p = 13; + if (size_p) + *size_p = 4; +} + +void +amd64_emit_le_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_le, + "cmp %rax,(%rsp)\n\t" + "jnle .Lamd64_le_fallthru\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lamd64_le_fallthru:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax"); + + if (offset_p) + *offset_p = 13; + if (size_p) + *size_p = 4; +} + +void +amd64_emit_gt_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_gt, + "cmp %rax,(%rsp)\n\t" + "jng .Lamd64_gt_fallthru\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lamd64_gt_fallthru:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax"); + + if (offset_p) + *offset_p = 13; + if (size_p) + *size_p = 4; +} + +void +amd64_emit_ge_goto (int *offset_p, int *size_p) +{ + EMIT_ASM (amd64_ge, + "cmp %rax,(%rsp)\n\t" + "jnge .Lamd64_ge_fallthru\n\t" + ".Lamd64_ge_jump:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lamd64_ge_fallthru:\n\t" + "lea 0x8(%rsp),%rsp\n\t" + "pop %rax"); + + if (offset_p) + *offset_p = 13; + if (size_p) + *size_p = 4; +} + +struct emit_ops amd64_emit_ops = + { + amd64_emit_prologue, + amd64_emit_epilogue, + amd64_emit_add, + amd64_emit_sub, + amd64_emit_mul, + amd64_emit_lsh, + amd64_emit_rsh_signed, + amd64_emit_rsh_unsigned, + amd64_emit_ext, + amd64_emit_log_not, + amd64_emit_bit_and, + amd64_emit_bit_or, + amd64_emit_bit_xor, + amd64_emit_bit_not, + amd64_emit_equal, + amd64_emit_less_signed, + amd64_emit_less_unsigned, + amd64_emit_ref, + amd64_emit_if_goto, + amd64_emit_goto, + amd64_write_goto_address, + amd64_emit_const, + amd64_emit_call, + amd64_emit_reg, + amd64_emit_pop, + amd64_emit_stack_flush, + amd64_emit_zero_ext, + amd64_emit_swap, + amd64_emit_stack_adjust, + amd64_emit_int_call_1, + amd64_emit_void_call_2, + amd64_emit_eq_goto, + amd64_emit_ne_goto, + amd64_emit_lt_goto, + amd64_emit_le_goto, + amd64_emit_gt_goto, + amd64_emit_ge_goto + }; + +#endif /* __x86_64__ */ + +static void +i386_emit_prologue (void) +{ + EMIT_ASM32 (i386_prologue, + "push %ebp\n\t" + "mov %esp,%ebp\n\t" + "push %ebx"); + /* At this point, the raw regs base address is at 8(%ebp), and the + value pointer is at 12(%ebp). */ +} + +static void +i386_emit_epilogue (void) +{ + EMIT_ASM32 (i386_epilogue, + "mov 12(%ebp),%ecx\n\t" + "mov %eax,(%ecx)\n\t" + "mov %ebx,0x4(%ecx)\n\t" + "xor %eax,%eax\n\t" + "pop %ebx\n\t" + "pop %ebp\n\t" + "ret"); +} + +static void +i386_emit_add (void) +{ + EMIT_ASM32 (i386_add, + "add (%esp),%eax\n\t" + "adc 0x4(%esp),%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_sub (void) +{ + EMIT_ASM32 (i386_sub, + "subl %eax,(%esp)\n\t" + "sbbl %ebx,4(%esp)\n\t" + "pop %eax\n\t" + "pop %ebx\n\t"); +} + +static void +i386_emit_mul (void) +{ + emit_error = 1; +} + +static void +i386_emit_lsh (void) +{ + emit_error = 1; +} + +static void +i386_emit_rsh_signed (void) +{ + emit_error = 1; +} + +static void +i386_emit_rsh_unsigned (void) +{ + emit_error = 1; +} + +static void +i386_emit_ext (int arg) +{ + switch (arg) + { + case 8: + EMIT_ASM32 (i386_ext_8, + "cbtw\n\t" + "cwtl\n\t" + "movl %eax,%ebx\n\t" + "sarl $31,%ebx"); + break; + case 16: + EMIT_ASM32 (i386_ext_16, + "cwtl\n\t" + "movl %eax,%ebx\n\t" + "sarl $31,%ebx"); + break; + case 32: + EMIT_ASM32 (i386_ext_32, + "movl %eax,%ebx\n\t" + "sarl $31,%ebx"); + break; + default: + emit_error = 1; + } +} + +static void +i386_emit_log_not (void) +{ + EMIT_ASM32 (i386_log_not, + "or %ebx,%eax\n\t" + "test %eax,%eax\n\t" + "sete %cl\n\t" + "xor %ebx,%ebx\n\t" + "movzbl %cl,%eax"); +} + +static void +i386_emit_bit_and (void) +{ + EMIT_ASM32 (i386_and, + "and (%esp),%eax\n\t" + "and 0x4(%esp),%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_bit_or (void) +{ + EMIT_ASM32 (i386_or, + "or (%esp),%eax\n\t" + "or 0x4(%esp),%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_bit_xor (void) +{ + EMIT_ASM32 (i386_xor, + "xor (%esp),%eax\n\t" + "xor 0x4(%esp),%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_bit_not (void) +{ + EMIT_ASM32 (i386_bit_not, + "xor $0xffffffff,%eax\n\t" + "xor $0xffffffff,%ebx\n\t"); +} + +static void +i386_emit_equal (void) +{ + EMIT_ASM32 (i386_equal, + "cmpl %ebx,4(%esp)\n\t" + "jne .Li386_equal_false\n\t" + "cmpl %eax,(%esp)\n\t" + "je .Li386_equal_true\n\t" + ".Li386_equal_false:\n\t" + "xor %eax,%eax\n\t" + "jmp .Li386_equal_end\n\t" + ".Li386_equal_true:\n\t" + "mov $1,%eax\n\t" + ".Li386_equal_end:\n\t" + "xor %ebx,%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_less_signed (void) +{ + EMIT_ASM32 (i386_less_signed, + "cmpl %ebx,4(%esp)\n\t" + "jl .Li386_less_signed_true\n\t" + "jne .Li386_less_signed_false\n\t" + "cmpl %eax,(%esp)\n\t" + "jl .Li386_less_signed_true\n\t" + ".Li386_less_signed_false:\n\t" + "xor %eax,%eax\n\t" + "jmp .Li386_less_signed_end\n\t" + ".Li386_less_signed_true:\n\t" + "mov $1,%eax\n\t" + ".Li386_less_signed_end:\n\t" + "xor %ebx,%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_less_unsigned (void) +{ + EMIT_ASM32 (i386_less_unsigned, + "cmpl %ebx,4(%esp)\n\t" + "jb .Li386_less_unsigned_true\n\t" + "jne .Li386_less_unsigned_false\n\t" + "cmpl %eax,(%esp)\n\t" + "jb .Li386_less_unsigned_true\n\t" + ".Li386_less_unsigned_false:\n\t" + "xor %eax,%eax\n\t" + "jmp .Li386_less_unsigned_end\n\t" + ".Li386_less_unsigned_true:\n\t" + "mov $1,%eax\n\t" + ".Li386_less_unsigned_end:\n\t" + "xor %ebx,%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_ref (int size) +{ + switch (size) + { + case 1: + EMIT_ASM32 (i386_ref1, + "movb (%eax),%al"); + break; + case 2: + EMIT_ASM32 (i386_ref2, + "movw (%eax),%ax"); + break; + case 4: + EMIT_ASM32 (i386_ref4, + "movl (%eax),%eax"); + break; + case 8: + EMIT_ASM32 (i386_ref8, + "movl 4(%eax),%ebx\n\t" + "movl (%eax),%eax"); + break; + } +} + +static void +i386_emit_if_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (i386_if_goto, + "mov %eax,%ecx\n\t" + "or %ebx,%ecx\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + "cmpl $0,%ecx\n\t" + /* Don't trust the assembler to choose the right jump */ + ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0"); + + if (offset_p) + *offset_p = 11; /* be sure that this matches the sequence above */ + if (size_p) + *size_p = 4; +} + +static void +i386_emit_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (i386_goto, + /* Don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0"); + if (offset_p) + *offset_p = 1; + if (size_p) + *size_p = 4; +} + +static void +i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size) +{ + int diff = (to - (from + size)); + unsigned char buf[sizeof (int)]; + + /* We're only doing 4-byte sizes at the moment. */ + if (size != 4) + { + emit_error = 1; + return; + } + + memcpy (buf, &diff, sizeof (int)); + write_inferior_memory (from, buf, sizeof (int)); +} + +static void +i386_emit_const (LONGEST num) +{ + unsigned char buf[16]; + int i, hi, lo; + CORE_ADDR buildaddr = current_insn_ptr; + + i = 0; + buf[i++] = 0xb8; /* mov $,%eax */ + lo = num & 0xffffffff; + memcpy (&buf[i], &lo, sizeof (lo)); + i += 4; + hi = ((num >> 32) & 0xffffffff); + if (hi) + { + buf[i++] = 0xbb; /* mov $,%ebx */ + memcpy (&buf[i], &hi, sizeof (hi)); + i += 4; + } + else + { + buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */ + } + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; +} + +static void +i386_emit_call (CORE_ADDR fn) +{ + unsigned char buf[16]; + int i, offset; + CORE_ADDR buildaddr; + + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xe8; /* call */ + offset = ((int) fn) - (buildaddr + 5); + memcpy (buf + 1, &offset, 4); + append_insns (&buildaddr, 5, buf); + current_insn_ptr = buildaddr; +} + +static void +i386_emit_reg (int reg) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + + EMIT_ASM32 (i386_reg_a, + "sub $0x8,%esp"); + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xb8; /* mov $,%eax */ + memcpy (&buf[i], ®, sizeof (reg)); + i += 4; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; + EMIT_ASM32 (i386_reg_b, + "mov %eax,4(%esp)\n\t" + "mov 8(%ebp),%eax\n\t" + "mov %eax,(%esp)"); + i386_emit_call (get_raw_reg_func_addr ()); + EMIT_ASM32 (i386_reg_c, + "xor %ebx,%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +static void +i386_emit_pop (void) +{ + EMIT_ASM32 (i386_pop, + "pop %eax\n\t" + "pop %ebx"); +} + +static void +i386_emit_stack_flush (void) +{ + EMIT_ASM32 (i386_stack_flush, + "push %ebx\n\t" + "push %eax"); +} + +static void +i386_emit_zero_ext (int arg) +{ + switch (arg) + { + case 8: + EMIT_ASM32 (i386_zero_ext_8, + "and $0xff,%eax\n\t" + "xor %ebx,%ebx"); + break; + case 16: + EMIT_ASM32 (i386_zero_ext_16, + "and $0xffff,%eax\n\t" + "xor %ebx,%ebx"); + break; + case 32: + EMIT_ASM32 (i386_zero_ext_32, + "xor %ebx,%ebx"); + break; + default: + emit_error = 1; + } +} + +static void +i386_emit_swap (void) +{ + EMIT_ASM32 (i386_swap, + "mov %eax,%ecx\n\t" + "mov %ebx,%edx\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + "push %edx\n\t" + "push %ecx"); +} + +static void +i386_emit_stack_adjust (int n) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr = current_insn_ptr; + + i = 0; + buf[i++] = 0x8d; /* lea $(%esp),%esp */ + buf[i++] = 0x64; + buf[i++] = 0x24; + buf[i++] = n * 8; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; +} + +/* FN's prototype is `LONGEST(*fn)(int)'. */ + +static void +i386_emit_int_call_1 (CORE_ADDR fn, int arg1) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + + EMIT_ASM32 (i386_int_call_1_a, + /* Reserve a bit of stack space. */ + "sub $0x8,%esp"); + /* Put the one argument on the stack. */ + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xc7; /* movl $,(%esp) */ + buf[i++] = 0x04; + buf[i++] = 0x24; + memcpy (&buf[i], &arg1, sizeof (arg1)); + i += 4; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; + i386_emit_call (fn); + EMIT_ASM32 (i386_int_call_1_c, + "mov %edx,%ebx\n\t" + "lea 0x8(%esp),%esp"); +} + +/* FN's prototype is `void(*fn)(int,LONGEST)'. */ + +static void +i386_emit_void_call_2 (CORE_ADDR fn, int arg1) +{ + unsigned char buf[16]; + int i; + CORE_ADDR buildaddr; + + EMIT_ASM32 (i386_void_call_2_a, + /* Preserve %eax only; we don't have to worry about %ebx. */ + "push %eax\n\t" + /* Reserve a bit of stack space for arguments. */ + "sub $0x10,%esp\n\t" + /* Copy "top" to the second argument position. (Note that + we can't assume function won't scribble on its + arguments, so don't try to restore from this.) */ + "mov %eax,4(%esp)\n\t" + "mov %ebx,8(%esp)"); + /* Put the first argument on the stack. */ + buildaddr = current_insn_ptr; + i = 0; + buf[i++] = 0xc7; /* movl $,(%esp) */ + buf[i++] = 0x04; + buf[i++] = 0x24; + memcpy (&buf[i], &arg1, sizeof (arg1)); + i += 4; + append_insns (&buildaddr, i, buf); + current_insn_ptr = buildaddr; + i386_emit_call (fn); + EMIT_ASM32 (i386_void_call_2_b, + "lea 0x10(%esp),%esp\n\t" + /* Restore original stack top. */ + "pop %eax"); +} + + +void +i386_emit_eq_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (eq, + /* Check low half first, more likely to be decider */ + "cmpl %eax,(%esp)\n\t" + "jne .Leq_fallthru\n\t" + "cmpl %ebx,4(%esp)\n\t" + "jne .Leq_fallthru\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Leq_fallthru:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx"); + + if (offset_p) + *offset_p = 18; + if (size_p) + *size_p = 4; +} + +void +i386_emit_ne_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (ne, + /* Check low half first, more likely to be decider */ + "cmpl %eax,(%esp)\n\t" + "jne .Lne_jump\n\t" + "cmpl %ebx,4(%esp)\n\t" + "je .Lne_fallthru\n\t" + ".Lne_jump:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lne_fallthru:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx"); + + if (offset_p) + *offset_p = 18; + if (size_p) + *size_p = 4; +} + +void +i386_emit_lt_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (lt, + "cmpl %ebx,4(%esp)\n\t" + "jl .Llt_jump\n\t" + "jne .Llt_fallthru\n\t" + "cmpl %eax,(%esp)\n\t" + "jnl .Llt_fallthru\n\t" + ".Llt_jump:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Llt_fallthru:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx"); + + if (offset_p) + *offset_p = 20; + if (size_p) + *size_p = 4; +} + +void +i386_emit_le_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (le, + "cmpl %ebx,4(%esp)\n\t" + "jle .Lle_jump\n\t" + "jne .Lle_fallthru\n\t" + "cmpl %eax,(%esp)\n\t" + "jnle .Lle_fallthru\n\t" + ".Lle_jump:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lle_fallthru:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx"); + + if (offset_p) + *offset_p = 20; + if (size_p) + *size_p = 4; +} + +void +i386_emit_gt_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (gt, + "cmpl %ebx,4(%esp)\n\t" + "jg .Lgt_jump\n\t" + "jne .Lgt_fallthru\n\t" + "cmpl %eax,(%esp)\n\t" + "jng .Lgt_fallthru\n\t" + ".Lgt_jump:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lgt_fallthru:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx"); + + if (offset_p) + *offset_p = 20; + if (size_p) + *size_p = 4; +} + +void +i386_emit_ge_goto (int *offset_p, int *size_p) +{ + EMIT_ASM32 (ge, + "cmpl %ebx,4(%esp)\n\t" + "jge .Lge_jump\n\t" + "jne .Lge_fallthru\n\t" + "cmpl %eax,(%esp)\n\t" + "jnge .Lge_fallthru\n\t" + ".Lge_jump:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx\n\t" + /* jmp, but don't trust the assembler to choose the right jump */ + ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t" + ".Lge_fallthru:\n\t" + "lea 0x8(%esp),%esp\n\t" + "pop %eax\n\t" + "pop %ebx"); + + if (offset_p) + *offset_p = 20; + if (size_p) + *size_p = 4; +} + +struct emit_ops i386_emit_ops = + { + i386_emit_prologue, + i386_emit_epilogue, + i386_emit_add, + i386_emit_sub, + i386_emit_mul, + i386_emit_lsh, + i386_emit_rsh_signed, + i386_emit_rsh_unsigned, + i386_emit_ext, + i386_emit_log_not, + i386_emit_bit_and, + i386_emit_bit_or, + i386_emit_bit_xor, + i386_emit_bit_not, + i386_emit_equal, + i386_emit_less_signed, + i386_emit_less_unsigned, + i386_emit_ref, + i386_emit_if_goto, + i386_emit_goto, + i386_write_goto_address, + i386_emit_const, + i386_emit_call, + i386_emit_reg, + i386_emit_pop, + i386_emit_stack_flush, + i386_emit_zero_ext, + i386_emit_swap, + i386_emit_stack_adjust, + i386_emit_int_call_1, + i386_emit_void_call_2, + i386_emit_eq_goto, + i386_emit_ne_goto, + i386_emit_lt_goto, + i386_emit_le_goto, + i386_emit_gt_goto, + i386_emit_ge_goto + }; + + +static struct emit_ops * +x86_emit_ops (void) +{ +#ifdef __x86_64__ + if (is_64bit_tdesc ()) + return &amd64_emit_ops; + else +#endif + return &i386_emit_ops; +} + +/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */ + +static const gdb_byte * +x86_sw_breakpoint_from_kind (int kind, int *size) +{ + *size = x86_breakpoint_len; + return x86_breakpoint; +} + +static int +x86_supports_range_stepping (void) +{ + return 1; +} + +/* Implementation of linux_target_ops method "supports_hardware_single_step". + */ + +static int +x86_supports_hardware_single_step (void) +{ + return 1; +} + +static int +x86_get_ipa_tdesc_idx (void) +{ + struct regcache *regcache = get_thread_regcache (current_thread, 0); + const struct target_desc *tdesc = regcache->tdesc; + +#ifdef __x86_64__ + return amd64_get_ipa_tdesc_idx (tdesc); +#endif + + if (tdesc == tdesc_i386_linux_no_xml) + return X86_TDESC_SSE; + + return i386_get_ipa_tdesc_idx (tdesc); +} + +/* This is initialized assuming an amd64 target. + x86_arch_setup will correct it for i386 or amd64 targets. */ + +struct linux_target_ops the_low_target = { x86_arch_setup, - -1, - NULL, - NULL, - NULL, + x86_linux_regs_info, + x86_cannot_fetch_register, + x86_cannot_store_register, + NULL, /* fetch_register */ x86_get_pc, x86_set_pc, - x86_breakpoint, - x86_breakpoint_len, + NULL, /* breakpoint_kind_from_pc */ + x86_sw_breakpoint_from_kind, NULL, 1, x86_breakpoint_at, + x86_supports_z_point_type, x86_insert_point, x86_remove_point, x86_stopped_by_watchpoint, @@ -852,5 +2880,41 @@ struct linux_target_ops the_low_target = x86_siginfo_fixup, x86_linux_new_process, x86_linux_new_thread, - x86_linux_prepare_to_resume + x86_linux_new_fork, + x86_linux_prepare_to_resume, + x86_linux_process_qsupported, + x86_supports_tracepoints, + x86_get_thread_area, + x86_install_fast_tracepoint_jump_pad, + x86_emit_ops, + x86_get_min_fast_tracepoint_insn_len, + x86_supports_range_stepping, + NULL, /* breakpoint_kind_from_current_state */ + x86_supports_hardware_single_step, + x86_get_syscall_trapinfo, + x86_get_ipa_tdesc_idx, }; + +void +initialize_low_arch (void) +{ + /* Initialize the Linux target descriptions. */ +#ifdef __x86_64__ + tdesc_amd64_linux_no_xml = XNEW (struct target_desc); + copy_target_description (tdesc_amd64_linux_no_xml, + amd64_linux_read_description (X86_XSTATE_SSE_MASK, + false)); + tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml; +#endif + +#if GDB_SELF_TEST + initialize_low_tdesc (); +#endif + + tdesc_i386_linux_no_xml = XNEW (struct target_desc); + copy_target_description (tdesc_i386_linux_no_xml, + i386_linux_read_description (X86_XSTATE_SSE_MASK)); + tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml; + + initialize_regsets_info (&x86_regsets_info); +}