+static int
+x86_supports_tracepoints (void)
+{
+ return 1;
+}
+
+static void
+append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
+{
+ write_inferior_memory (*to, buf, len);
+ *to += len;
+}
+
+static int
+push_opcode (unsigned char *buf, char *op)
+{
+ unsigned char *buf_org = buf;
+
+ while (1)
+ {
+ char *endptr;
+ unsigned long ul = strtoul (op, &endptr, 16);
+
+ if (endptr == op)
+ break;
+
+ *buf++ = ul;
+ op = endptr;
+ }
+
+ return buf - buf_org;
+}
+
+#ifdef __x86_64__
+
+/* Build a jump pad that saves registers and calls a collection
+ function. Writes a jump instruction to the jump pad to
+ JJUMPAD_INSN. The caller is responsible to write it in at the
+ tracepoint address. */
+
+static int
+amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
+ CORE_ADDR collector,
+ CORE_ADDR lockaddr,
+ ULONGEST orig_size,
+ CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
+ unsigned char *jjump_pad_insn,
+ ULONGEST *jjump_pad_insn_size,
+ CORE_ADDR *adjusted_insn_addr,
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
+{
+ unsigned char buf[40];
+ int i, offset;
+ int64_t loffset;
+
+ CORE_ADDR buildaddr = *jump_entry;
+
+ /* Build the jump pad. */
+
+ /* First, do tracepoint data collection. Save registers. */
+ i = 0;
+ /* Need to ensure stack pointer saved first. */
+ buf[i++] = 0x54; /* push %rsp */
+ buf[i++] = 0x55; /* push %rbp */
+ buf[i++] = 0x57; /* push %rdi */
+ buf[i++] = 0x56; /* push %rsi */
+ buf[i++] = 0x52; /* push %rdx */
+ buf[i++] = 0x51; /* push %rcx */
+ buf[i++] = 0x53; /* push %rbx */
+ buf[i++] = 0x50; /* push %rax */
+ buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
+ buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
+ buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
+ buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
+ buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
+ buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
+ buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
+ buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
+ buf[i++] = 0x9c; /* pushfq */
+ buf[i++] = 0x48; /* movl <addr>,%rdi */
+ buf[i++] = 0xbf;
+ *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
+ i += sizeof (unsigned long);
+ buf[i++] = 0x57; /* push %rdi */
+ append_insns (&buildaddr, i, buf);
+
+ /* Stack space for the collecting_t object. */
+ i = 0;
+ i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
+ i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
+ memcpy (buf + i, &tpoint, 8);
+ i += 8;
+ i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
+ i += push_opcode (&buf[i],
+ "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
+ i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
+ append_insns (&buildaddr, i, buf);
+
+ /* spin-lock. */
+ i = 0;
+ i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
+ memcpy (&buf[i], (void *) &lockaddr, 8);
+ i += 8;
+ i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
+ i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
+ i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
+ i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
+ i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
+ append_insns (&buildaddr, i, buf);
+
+ /* Set up the gdb_collect call. */
+ /* At this point, (stack pointer + 0x18) is the base of our saved
+ register block. */
+
+ i = 0;
+ i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
+ i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
+
+ /* tpoint address may be 64-bit wide. */
+ i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
+ memcpy (buf + i, &tpoint, 8);
+ i += 8;
+ append_insns (&buildaddr, i, buf);
+
+ /* The collector function being in the shared library, may be
+ >31-bits away off the jump pad. */
+ i = 0;
+ i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
+ memcpy (buf + i, &collector, 8);
+ i += 8;
+ i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
+ append_insns (&buildaddr, i, buf);
+
+ /* Clear the spin-lock. */
+ i = 0;
+ i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
+ i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
+ memcpy (buf + i, &lockaddr, 8);
+ i += 8;
+ append_insns (&buildaddr, i, buf);
+
+ /* Remove stack that had been used for the collect_t object. */
+ i = 0;
+ i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
+ append_insns (&buildaddr, i, buf);
+
+ /* Restore register state. */
+ i = 0;
+ buf[i++] = 0x48; /* add $0x8,%rsp */
+ buf[i++] = 0x83;
+ buf[i++] = 0xc4;
+ buf[i++] = 0x08;
+ buf[i++] = 0x9d; /* popfq */
+ buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
+ buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
+ buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
+ buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
+ buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
+ buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
+ buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
+ buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
+ buf[i++] = 0x58; /* pop %rax */
+ buf[i++] = 0x5b; /* pop %rbx */
+ buf[i++] = 0x59; /* pop %rcx */
+ buf[i++] = 0x5a; /* pop %rdx */
+ buf[i++] = 0x5e; /* pop %rsi */
+ buf[i++] = 0x5f; /* pop %rdi */
+ buf[i++] = 0x5d; /* pop %rbp */
+ buf[i++] = 0x5c; /* pop %rsp */
+ append_insns (&buildaddr, i, buf);
+
+ /* Now, adjust the original instruction to execute in the jump
+ pad. */
+ *adjusted_insn_addr = buildaddr;
+ relocate_instruction (&buildaddr, tpaddr);
+ *adjusted_insn_addr_end = buildaddr;
+
+ /* Finally, write a jump back to the program. */
+
+ loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
+ if (loffset > INT_MAX || loffset < INT_MIN)
+ {
+ sprintf (err,
+ "E.Jump back from jump pad too far from tracepoint "
+ "(offset 0x%" PRIx64 " > int32).", loffset);
+ return 1;
+ }
+
+ offset = (int) loffset;
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ append_insns (&buildaddr, sizeof (jump_insn), buf);
+
+ /* The jump pad is now built. Wire in a jump to our jump pad. This
+ is always done last (by our caller actually), so that we can
+ install fast tracepoints with threads running. This relies on
+ the agent's atomic write support. */
+ loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
+ if (loffset > INT_MAX || loffset < INT_MIN)
+ {
+ sprintf (err,
+ "E.Jump pad too far from tracepoint "
+ "(offset 0x%" PRIx64 " > int32).", loffset);
+ return 1;
+ }
+
+ offset = (int) loffset;
+
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
+ *jjump_pad_insn_size = sizeof (jump_insn);
+
+ /* Return the end address of our pad. */
+ *jump_entry = buildaddr;
+
+ return 0;
+}
+
+#endif /* __x86_64__ */
+
+/* Build a jump pad that saves registers and calls a collection
+ function. Writes a jump instruction to the jump pad to
+ JJUMPAD_INSN. The caller is responsible to write it in at the
+ tracepoint address. */
+
+static int
+i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
+ CORE_ADDR collector,
+ CORE_ADDR lockaddr,
+ ULONGEST orig_size,
+ CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
+ unsigned char *jjump_pad_insn,
+ ULONGEST *jjump_pad_insn_size,
+ CORE_ADDR *adjusted_insn_addr,
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
+{
+ unsigned char buf[0x100];
+ int i, offset;
+ CORE_ADDR buildaddr = *jump_entry;
+
+ /* Build the jump pad. */
+
+ /* First, do tracepoint data collection. Save registers. */
+ i = 0;
+ buf[i++] = 0x60; /* pushad */
+ buf[i++] = 0x68; /* push tpaddr aka $pc */
+ *((int *)(buf + i)) = (int) tpaddr;
+ i += 4;
+ buf[i++] = 0x9c; /* pushf */
+ buf[i++] = 0x1e; /* push %ds */
+ buf[i++] = 0x06; /* push %es */
+ buf[i++] = 0x0f; /* push %fs */
+ buf[i++] = 0xa0;
+ buf[i++] = 0x0f; /* push %gs */
+ buf[i++] = 0xa8;
+ buf[i++] = 0x16; /* push %ss */
+ buf[i++] = 0x0e; /* push %cs */
+ append_insns (&buildaddr, i, buf);
+
+ /* Stack space for the collecting_t object. */
+ i = 0;
+ i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
+
+ /* Build the object. */
+ i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
+ memcpy (buf + i, &tpoint, 4);
+ i += 4;
+ i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
+
+ i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
+ i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
+ append_insns (&buildaddr, i, buf);
+
+ /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
+ If we cared for it, this could be using xchg alternatively. */
+
+ i = 0;
+ i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
+ i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
+ %esp,<lockaddr> */
+ memcpy (&buf[i], (void *) &lockaddr, 4);
+ i += 4;
+ i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
+ i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
+ append_insns (&buildaddr, i, buf);
+
+
+ /* Set up arguments to the gdb_collect call. */
+ i = 0;
+ i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
+ i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
+ i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
+ append_insns (&buildaddr, i, buf);
+
+ i = 0;
+ i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
+ append_insns (&buildaddr, i, buf);
+
+ i = 0;
+ i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
+ memcpy (&buf[i], (void *) &tpoint, 4);
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+
+ buf[0] = 0xe8; /* call <reladdr> */
+ offset = collector - (buildaddr + sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ append_insns (&buildaddr, 5, buf);
+ /* Clean up after the call. */
+ buf[0] = 0x83; /* add $0x8,%esp */
+ buf[1] = 0xc4;
+ buf[2] = 0x08;
+ append_insns (&buildaddr, 3, buf);
+
+
+ /* Clear the spin-lock. This would need the LOCK prefix on older
+ broken archs. */
+ i = 0;
+ i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
+ i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
+ memcpy (buf + i, &lockaddr, 4);
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+
+
+ /* Remove stack that had been used for the collect_t object. */
+ i = 0;
+ i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
+ append_insns (&buildaddr, i, buf);
+
+ i = 0;
+ buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
+ buf[i++] = 0xc4;
+ buf[i++] = 0x04;
+ buf[i++] = 0x17; /* pop %ss */
+ buf[i++] = 0x0f; /* pop %gs */
+ buf[i++] = 0xa9;
+ buf[i++] = 0x0f; /* pop %fs */
+ buf[i++] = 0xa1;
+ buf[i++] = 0x07; /* pop %es */
+ buf[i++] = 0x1f; /* pop %ds */
+ buf[i++] = 0x9d; /* popf */
+ buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
+ buf[i++] = 0xc4;
+ buf[i++] = 0x04;
+ buf[i++] = 0x61; /* popad */
+ append_insns (&buildaddr, i, buf);
+
+ /* Now, adjust the original instruction to execute in the jump
+ pad. */
+ *adjusted_insn_addr = buildaddr;
+ relocate_instruction (&buildaddr, tpaddr);
+ *adjusted_insn_addr_end = buildaddr;
+
+ /* Write the jump back to the program. */
+ offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ append_insns (&buildaddr, sizeof (jump_insn), buf);
+
+ /* The jump pad is now built. Wire in a jump to our jump pad. This
+ is always done last (by our caller actually), so that we can
+ install fast tracepoints with threads running. This relies on
+ the agent's atomic write support. */
+ if (orig_size == 4)
+ {
+ /* Create a trampoline. */
+ *trampoline_size = sizeof (jump_insn);
+ if (!claim_trampoline_space (*trampoline_size, trampoline))
+ {
+ /* No trampoline space available. */
+ strcpy (err,
+ "E.Cannot allocate trampoline space needed for fast "
+ "tracepoints on 4-byte instructions.");
+ return 1;
+ }
+
+ offset = *jump_entry - (*trampoline + sizeof (jump_insn));
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
+
+ /* Use a 16-bit relative jump instruction to jump to the trampoline. */
+ offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
+ memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
+ memcpy (buf + 2, &offset, 2);
+ memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
+ *jjump_pad_insn_size = sizeof (small_jump_insn);
+ }
+ else
+ {
+ /* Else use a 32-bit relative jump instruction. */
+ offset = *jump_entry - (tpaddr + sizeof (jump_insn));
+ memcpy (buf, jump_insn, sizeof (jump_insn));
+ memcpy (buf + 1, &offset, 4);
+ memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
+ *jjump_pad_insn_size = sizeof (jump_insn);
+ }
+
+ /* Return the end address of our pad. */
+ *jump_entry = buildaddr;
+
+ return 0;
+}
+
+static int
+x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
+ CORE_ADDR collector,
+ CORE_ADDR lockaddr,
+ ULONGEST orig_size,
+ CORE_ADDR *jump_entry,
+ CORE_ADDR *trampoline,
+ ULONGEST *trampoline_size,
+ unsigned char *jjump_pad_insn,
+ ULONGEST *jjump_pad_insn_size,
+ CORE_ADDR *adjusted_insn_addr,
+ CORE_ADDR *adjusted_insn_addr_end,
+ char *err)
+{
+#ifdef __x86_64__
+ if (register_size (0) == 8)
+ return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
+ collector, lockaddr,
+ orig_size, jump_entry,
+ trampoline, trampoline_size,
+ jjump_pad_insn,
+ jjump_pad_insn_size,
+ adjusted_insn_addr,
+ adjusted_insn_addr_end,
+ err);
+#endif
+
+ return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
+ collector, lockaddr,
+ orig_size, jump_entry,
+ trampoline, trampoline_size,
+ jjump_pad_insn,
+ jjump_pad_insn_size,
+ adjusted_insn_addr,
+ adjusted_insn_addr_end,
+ err);
+}
+
+/* Return the minimum instruction length for fast tracepoints on x86/x86-64
+ architectures. */
+
+static int
+x86_get_min_fast_tracepoint_insn_len (void)
+{
+ static int warned_about_fast_tracepoints = 0;
+
+#ifdef __x86_64__
+ /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
+ used for fast tracepoints. */
+ if (register_size (0) == 8)
+ return 5;
+#endif
+
+ if (agent_loaded_p ())
+ {
+ char errbuf[IPA_BUFSIZ];
+
+ errbuf[0] = '\0';
+
+ /* On x86, if trampolines are available, then 4-byte jump instructions
+ with a 2-byte offset may be used, otherwise 5-byte jump instructions
+ with a 4-byte offset are used instead. */
+ if (have_fast_tracepoint_trampoline_buffer (errbuf))
+ return 4;
+ else
+ {
+ /* GDB has no channel to explain to user why a shorter fast
+ tracepoint is not possible, but at least make GDBserver
+ mention that something has gone awry. */
+ if (!warned_about_fast_tracepoints)
+ {
+ warning ("4-byte fast tracepoints not available; %s\n", errbuf);
+ warned_about_fast_tracepoints = 1;
+ }
+ return 5;
+ }
+ }
+ else
+ {
+ /* Indicate that the minimum length is currently unknown since the IPA
+ has not loaded yet. */
+ return 0;
+ }
+}
+
+static void
+add_insns (unsigned char *start, int len)
+{
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ if (debug_threads)
+ fprintf (stderr, "Adding %d bytes of insn at %s\n",
+ len, paddress (buildaddr));
+
+ append_insns (&buildaddr, len, start);
+ current_insn_ptr = buildaddr;
+}
+
+/* Our general strategy for emitting code is to avoid specifying raw
+ bytes whenever possible, and instead copy a block of inline asm
+ that is embedded in the function. This is a little messy, because
+ we need to keep the compiler from discarding what looks like dead
+ code, plus suppress various warnings. */
+
+#define EMIT_ASM(NAME, INSNS) \
+ do \
+ { \
+ extern unsigned char start_ ## NAME, end_ ## NAME; \
+ add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
+ __asm__ ("jmp end_" #NAME "\n" \
+ "\t" "start_" #NAME ":" \
+ "\t" INSNS "\n" \
+ "\t" "end_" #NAME ":"); \
+ } while (0)
+
+#ifdef __x86_64__
+
+#define EMIT_ASM32(NAME,INSNS) \
+ do \
+ { \
+ extern unsigned char start_ ## NAME, end_ ## NAME; \
+ add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
+ __asm__ (".code32\n" \
+ "\t" "jmp end_" #NAME "\n" \
+ "\t" "start_" #NAME ":\n" \
+ "\t" INSNS "\n" \
+ "\t" "end_" #NAME ":\n" \
+ ".code64\n"); \
+ } while (0)
+
+#else
+
+#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
+
+#endif
+
+#ifdef __x86_64__
+
+static void
+amd64_emit_prologue (void)
+{
+ EMIT_ASM (amd64_prologue,
+ "pushq %rbp\n\t"
+ "movq %rsp,%rbp\n\t"
+ "sub $0x20,%rsp\n\t"
+ "movq %rdi,-8(%rbp)\n\t"
+ "movq %rsi,-16(%rbp)");
+}
+
+
+static void
+amd64_emit_epilogue (void)
+{
+ EMIT_ASM (amd64_epilogue,
+ "movq -16(%rbp),%rdi\n\t"
+ "movq %rax,(%rdi)\n\t"
+ "xor %rax,%rax\n\t"
+ "leave\n\t"
+ "ret");
+}
+
+static void
+amd64_emit_add (void)
+{
+ EMIT_ASM (amd64_add,
+ "add (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_sub (void)
+{
+ EMIT_ASM (amd64_sub,
+ "sub %rax,(%rsp)\n\t"
+ "pop %rax");
+}
+
+static void
+amd64_emit_mul (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_lsh (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_rsh_signed (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_rsh_unsigned (void)
+{
+ emit_error = 1;
+}
+
+static void
+amd64_emit_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM (amd64_ext_8,
+ "cbtw\n\t"
+ "cwtl\n\t"
+ "cltq");
+ break;
+ case 16:
+ EMIT_ASM (amd64_ext_16,
+ "cwtl\n\t"
+ "cltq");
+ break;
+ case 32:
+ EMIT_ASM (amd64_ext_32,
+ "cltq");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+amd64_emit_log_not (void)
+{
+ EMIT_ASM (amd64_log_not,
+ "test %rax,%rax\n\t"
+ "sete %cl\n\t"
+ "movzbq %cl,%rax");
+}
+
+static void
+amd64_emit_bit_and (void)
+{
+ EMIT_ASM (amd64_and,
+ "and (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_or (void)
+{
+ EMIT_ASM (amd64_or,
+ "or (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_xor (void)
+{
+ EMIT_ASM (amd64_xor,
+ "xor (%rsp),%rax\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_not (void)
+{
+ EMIT_ASM (amd64_bit_not,
+ "xorq $0xffffffffffffffff,%rax");
+}
+
+static void
+amd64_emit_equal (void)
+{
+ EMIT_ASM (amd64_equal,
+ "cmp %rax,(%rsp)\n\t"
+ "je .Lamd64_equal_true\n\t"
+ "xor %rax,%rax\n\t"
+ "jmp .Lamd64_equal_end\n\t"
+ ".Lamd64_equal_true:\n\t"
+ "mov $0x1,%rax\n\t"
+ ".Lamd64_equal_end:\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_less_signed (void)
+{
+ EMIT_ASM (amd64_less_signed,
+ "cmp %rax,(%rsp)\n\t"
+ "jl .Lamd64_less_signed_true\n\t"
+ "xor %rax,%rax\n\t"
+ "jmp .Lamd64_less_signed_end\n\t"
+ ".Lamd64_less_signed_true:\n\t"
+ "mov $1,%rax\n\t"
+ ".Lamd64_less_signed_end:\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_less_unsigned (void)
+{
+ EMIT_ASM (amd64_less_unsigned,
+ "cmp %rax,(%rsp)\n\t"
+ "jb .Lamd64_less_unsigned_true\n\t"
+ "xor %rax,%rax\n\t"
+ "jmp .Lamd64_less_unsigned_end\n\t"
+ ".Lamd64_less_unsigned_true:\n\t"
+ "mov $1,%rax\n\t"
+ ".Lamd64_less_unsigned_end:\n\t"
+ "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_ref (int size)
+{
+ switch (size)
+ {
+ case 1:
+ EMIT_ASM (amd64_ref1,
+ "movb (%rax),%al");
+ break;
+ case 2:
+ EMIT_ASM (amd64_ref2,
+ "movw (%rax),%ax");
+ break;
+ case 4:
+ EMIT_ASM (amd64_ref4,
+ "movl (%rax),%eax");
+ break;
+ case 8:
+ EMIT_ASM (amd64_ref8,
+ "movq (%rax),%rax");
+ break;
+ }
+}
+
+static void
+amd64_emit_if_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_if_goto,
+ "mov %rax,%rcx\n\t"
+ "pop %rax\n\t"
+ "cmp $0,%rcx\n\t"
+ ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
+ if (offset_p)
+ *offset_p = 10;
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+amd64_emit_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_goto,
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
+ if (offset_p)
+ *offset_p = 1;
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+ int diff = (to - (from + size));
+ unsigned char buf[sizeof (int)];
+
+ if (size != 4)
+ {
+ emit_error = 1;
+ return;
+ }
+
+ memcpy (buf, &diff, sizeof (int));
+ write_inferior_memory (from, buf, sizeof (int));
+}
+
+static void
+amd64_emit_const (LONGEST num)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
+ memcpy (&buf[i], &num, sizeof (num));
+ i += 8;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+amd64_emit_call (CORE_ADDR fn)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+ LONGEST offset64;
+
+ /* The destination function being in the shared library, may be
+ >31-bits away off the compiled code pad. */
+
+ buildaddr = current_insn_ptr;
+
+ offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
+
+ i = 0;
+
+ if (offset64 > INT_MAX || offset64 < INT_MIN)
+ {
+ /* Offset is too large for a call. Use callq, but that requires
+ a register, so avoid it if possible. Use r10, since it is
+ call-clobbered, we don't have to push/pop it. */
+ buf[i++] = 0x48; /* mov $fn,%r10 */
+ buf[i++] = 0xba;
+ memcpy (buf + i, &fn, 8);
+ i += 8;
+ buf[i++] = 0xff; /* callq *%r10 */
+ buf[i++] = 0xd2;
+ }
+ else
+ {
+ int offset32 = offset64; /* we know we can't overflow here. */
+ memcpy (buf + i, &offset32, 4);
+ i += 4;
+ }
+
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+amd64_emit_reg (int reg)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ /* Assume raw_regs is still in %rdi. */
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xbe; /* mov $<n>,%esi */
+ memcpy (&buf[i], ®, sizeof (reg));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ amd64_emit_call (get_raw_reg_func_addr ());
+}
+
+static void
+amd64_emit_pop (void)
+{
+ EMIT_ASM (amd64_pop,
+ "pop %rax");
+}
+
+static void
+amd64_emit_stack_flush (void)
+{
+ EMIT_ASM (amd64_stack_flush,
+ "push %rax");
+}
+
+static void
+amd64_emit_zero_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM (amd64_zero_ext_8,
+ "and $0xff,%rax");
+ break;
+ case 16:
+ EMIT_ASM (amd64_zero_ext_16,
+ "and $0xffff,%rax");
+ break;
+ case 32:
+ EMIT_ASM (amd64_zero_ext_32,
+ "mov $0xffffffff,%rcx\n\t"
+ "and %rcx,%rax");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+amd64_emit_swap (void)
+{
+ EMIT_ASM (amd64_swap,
+ "mov %rax,%rcx\n\t"
+ "pop %rax\n\t"
+ "push %rcx");
+}
+
+static void
+amd64_emit_stack_adjust (int n)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
+ buf[i++] = 0x8d;
+ buf[i++] = 0x64;
+ buf[i++] = 0x24;
+ /* This only handles adjustments up to 16, but we don't expect any more. */
+ buf[i++] = n * 8;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'. */
+
+static void
+amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xbf; /* movl $<n>,%edi */
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ amd64_emit_call (fn);
+}
+
+/* FN's prototype is `void(*fn)(int,LONGEST)'. */
+
+static void
+amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xbf; /* movl $<n>,%edi */
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ EMIT_ASM (amd64_void_call_2_a,
+ /* Save away a copy of the stack top. */
+ "push %rax\n\t"
+ /* Also pass top as the second argument. */
+ "mov %rax,%rsi");
+ amd64_emit_call (fn);
+ EMIT_ASM (amd64_void_call_2_b,
+ /* Restore the stack top, %rax may have been trashed. */
+ "pop %rax");
+}
+
+void
+amd64_emit_eq_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_eq,
+ "cmp %rax,(%rsp)\n\t"
+ "jne .Lamd64_eq_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_eq_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_ne_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_ne,
+ "cmp %rax,(%rsp)\n\t"
+ "je .Lamd64_ne_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_ne_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_lt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_lt,
+ "cmp %rax,(%rsp)\n\t"
+ "jnl .Lamd64_lt_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_lt_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_le_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_le,
+ "cmp %rax,(%rsp)\n\t"
+ "jnle .Lamd64_le_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_le_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_gt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_gt,
+ "cmp %rax,(%rsp)\n\t"
+ "jng .Lamd64_gt_fallthru\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_gt_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+amd64_emit_ge_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM (amd64_ge,
+ "cmp %rax,(%rsp)\n\t"
+ "jnge .Lamd64_ge_fallthru\n\t"
+ ".Lamd64_ge_jump:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lamd64_ge_fallthru:\n\t"
+ "lea 0x8(%rsp),%rsp\n\t"
+ "pop %rax");
+
+ if (offset_p)
+ *offset_p = 13;
+ if (size_p)
+ *size_p = 4;
+}
+
+struct emit_ops amd64_emit_ops =
+ {
+ amd64_emit_prologue,
+ amd64_emit_epilogue,
+ amd64_emit_add,
+ amd64_emit_sub,
+ amd64_emit_mul,
+ amd64_emit_lsh,
+ amd64_emit_rsh_signed,
+ amd64_emit_rsh_unsigned,
+ amd64_emit_ext,
+ amd64_emit_log_not,
+ amd64_emit_bit_and,
+ amd64_emit_bit_or,
+ amd64_emit_bit_xor,
+ amd64_emit_bit_not,
+ amd64_emit_equal,
+ amd64_emit_less_signed,
+ amd64_emit_less_unsigned,
+ amd64_emit_ref,
+ amd64_emit_if_goto,
+ amd64_emit_goto,
+ amd64_write_goto_address,
+ amd64_emit_const,
+ amd64_emit_call,
+ amd64_emit_reg,
+ amd64_emit_pop,
+ amd64_emit_stack_flush,
+ amd64_emit_zero_ext,
+ amd64_emit_swap,
+ amd64_emit_stack_adjust,
+ amd64_emit_int_call_1,
+ amd64_emit_void_call_2,
+ amd64_emit_eq_goto,
+ amd64_emit_ne_goto,
+ amd64_emit_lt_goto,
+ amd64_emit_le_goto,
+ amd64_emit_gt_goto,
+ amd64_emit_ge_goto
+ };
+
+#endif /* __x86_64__ */
+
+static void
+i386_emit_prologue (void)
+{
+ EMIT_ASM32 (i386_prologue,
+ "push %ebp\n\t"
+ "mov %esp,%ebp\n\t"
+ "push %ebx");
+ /* At this point, the raw regs base address is at 8(%ebp), and the
+ value pointer is at 12(%ebp). */
+}
+
+static void
+i386_emit_epilogue (void)
+{
+ EMIT_ASM32 (i386_epilogue,
+ "mov 12(%ebp),%ecx\n\t"
+ "mov %eax,(%ecx)\n\t"
+ "mov %ebx,0x4(%ecx)\n\t"
+ "xor %eax,%eax\n\t"
+ "pop %ebx\n\t"
+ "pop %ebp\n\t"
+ "ret");
+}
+
+static void
+i386_emit_add (void)
+{
+ EMIT_ASM32 (i386_add,
+ "add (%esp),%eax\n\t"
+ "adc 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_sub (void)
+{
+ EMIT_ASM32 (i386_sub,
+ "subl %eax,(%esp)\n\t"
+ "sbbl %ebx,4(%esp)\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t");
+}
+
+static void
+i386_emit_mul (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_lsh (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_rsh_signed (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_rsh_unsigned (void)
+{
+ emit_error = 1;
+}
+
+static void
+i386_emit_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM32 (i386_ext_8,
+ "cbtw\n\t"
+ "cwtl\n\t"
+ "movl %eax,%ebx\n\t"
+ "sarl $31,%ebx");
+ break;
+ case 16:
+ EMIT_ASM32 (i386_ext_16,
+ "cwtl\n\t"
+ "movl %eax,%ebx\n\t"
+ "sarl $31,%ebx");
+ break;
+ case 32:
+ EMIT_ASM32 (i386_ext_32,
+ "movl %eax,%ebx\n\t"
+ "sarl $31,%ebx");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+i386_emit_log_not (void)
+{
+ EMIT_ASM32 (i386_log_not,
+ "or %ebx,%eax\n\t"
+ "test %eax,%eax\n\t"
+ "sete %cl\n\t"
+ "xor %ebx,%ebx\n\t"
+ "movzbl %cl,%eax");
+}
+
+static void
+i386_emit_bit_and (void)
+{
+ EMIT_ASM32 (i386_and,
+ "and (%esp),%eax\n\t"
+ "and 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_or (void)
+{
+ EMIT_ASM32 (i386_or,
+ "or (%esp),%eax\n\t"
+ "or 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_xor (void)
+{
+ EMIT_ASM32 (i386_xor,
+ "xor (%esp),%eax\n\t"
+ "xor 0x4(%esp),%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_not (void)
+{
+ EMIT_ASM32 (i386_bit_not,
+ "xor $0xffffffff,%eax\n\t"
+ "xor $0xffffffff,%ebx\n\t");
+}
+
+static void
+i386_emit_equal (void)
+{
+ EMIT_ASM32 (i386_equal,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jne .Li386_equal_false\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "je .Li386_equal_true\n\t"
+ ".Li386_equal_false:\n\t"
+ "xor %eax,%eax\n\t"
+ "jmp .Li386_equal_end\n\t"
+ ".Li386_equal_true:\n\t"
+ "mov $1,%eax\n\t"
+ ".Li386_equal_end:\n\t"
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_less_signed (void)
+{
+ EMIT_ASM32 (i386_less_signed,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jl .Li386_less_signed_true\n\t"
+ "jne .Li386_less_signed_false\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jl .Li386_less_signed_true\n\t"
+ ".Li386_less_signed_false:\n\t"
+ "xor %eax,%eax\n\t"
+ "jmp .Li386_less_signed_end\n\t"
+ ".Li386_less_signed_true:\n\t"
+ "mov $1,%eax\n\t"
+ ".Li386_less_signed_end:\n\t"
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_less_unsigned (void)
+{
+ EMIT_ASM32 (i386_less_unsigned,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jb .Li386_less_unsigned_true\n\t"
+ "jne .Li386_less_unsigned_false\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jb .Li386_less_unsigned_true\n\t"
+ ".Li386_less_unsigned_false:\n\t"
+ "xor %eax,%eax\n\t"
+ "jmp .Li386_less_unsigned_end\n\t"
+ ".Li386_less_unsigned_true:\n\t"
+ "mov $1,%eax\n\t"
+ ".Li386_less_unsigned_end:\n\t"
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_ref (int size)
+{
+ switch (size)
+ {
+ case 1:
+ EMIT_ASM32 (i386_ref1,
+ "movb (%eax),%al");
+ break;
+ case 2:
+ EMIT_ASM32 (i386_ref2,
+ "movw (%eax),%ax");
+ break;
+ case 4:
+ EMIT_ASM32 (i386_ref4,
+ "movl (%eax),%eax");
+ break;
+ case 8:
+ EMIT_ASM32 (i386_ref8,
+ "movl 4(%eax),%ebx\n\t"
+ "movl (%eax),%eax");
+ break;
+ }
+}
+
+static void
+i386_emit_if_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (i386_if_goto,
+ "mov %eax,%ecx\n\t"
+ "or %ebx,%ecx\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ "cmpl $0,%ecx\n\t"
+ /* Don't trust the assembler to choose the right jump */
+ ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
+
+ if (offset_p)
+ *offset_p = 11; /* be sure that this matches the sequence above */
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+i386_emit_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (i386_goto,
+ /* Don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
+ if (offset_p)
+ *offset_p = 1;
+ if (size_p)
+ *size_p = 4;
+}
+
+static void
+i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+ int diff = (to - (from + size));
+ unsigned char buf[sizeof (int)];
+
+ /* We're only doing 4-byte sizes at the moment. */
+ if (size != 4)
+ {
+ emit_error = 1;
+ return;
+ }
+
+ memcpy (buf, &diff, sizeof (int));
+ write_inferior_memory (from, buf, sizeof (int));
+}
+
+static void
+i386_emit_const (LONGEST num)
+{
+ unsigned char buf[16];
+ int i, hi, lo;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0xb8; /* mov $<n>,%eax */
+ lo = num & 0xffffffff;
+ memcpy (&buf[i], &lo, sizeof (lo));
+ i += 4;
+ hi = ((num >> 32) & 0xffffffff);
+ if (hi)
+ {
+ buf[i++] = 0xbb; /* mov $<n>,%ebx */
+ memcpy (&buf[i], &hi, sizeof (hi));
+ i += 4;
+ }
+ else
+ {
+ buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
+ }
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+i386_emit_call (CORE_ADDR fn)
+{
+ unsigned char buf[16];
+ int i, offset;
+ CORE_ADDR buildaddr;
+
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xe8; /* call <reladdr> */
+ offset = ((int) fn) - (buildaddr + 5);
+ memcpy (buf + 1, &offset, 4);
+ append_insns (&buildaddr, 5, buf);
+ current_insn_ptr = buildaddr;
+}
+
+static void
+i386_emit_reg (int reg)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ EMIT_ASM32 (i386_reg_a,
+ "sub $0x8,%esp");
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xb8; /* mov $<n>,%eax */
+ memcpy (&buf[i], ®, sizeof (reg));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ EMIT_ASM32 (i386_reg_b,
+ "mov %eax,4(%esp)\n\t"
+ "mov 8(%ebp),%eax\n\t"
+ "mov %eax,(%esp)");
+ i386_emit_call (get_raw_reg_func_addr ());
+ EMIT_ASM32 (i386_reg_c,
+ "xor %ebx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_pop (void)
+{
+ EMIT_ASM32 (i386_pop,
+ "pop %eax\n\t"
+ "pop %ebx");
+}
+
+static void
+i386_emit_stack_flush (void)
+{
+ EMIT_ASM32 (i386_stack_flush,
+ "push %ebx\n\t"
+ "push %eax");
+}
+
+static void
+i386_emit_zero_ext (int arg)
+{
+ switch (arg)
+ {
+ case 8:
+ EMIT_ASM32 (i386_zero_ext_8,
+ "and $0xff,%eax\n\t"
+ "xor %ebx,%ebx");
+ break;
+ case 16:
+ EMIT_ASM32 (i386_zero_ext_16,
+ "and $0xffff,%eax\n\t"
+ "xor %ebx,%ebx");
+ break;
+ case 32:
+ EMIT_ASM32 (i386_zero_ext_32,
+ "xor %ebx,%ebx");
+ break;
+ default:
+ emit_error = 1;
+ }
+}
+
+static void
+i386_emit_swap (void)
+{
+ EMIT_ASM32 (i386_swap,
+ "mov %eax,%ecx\n\t"
+ "mov %ebx,%edx\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ "push %edx\n\t"
+ "push %ecx");
+}
+
+static void
+i386_emit_stack_adjust (int n)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr = current_insn_ptr;
+
+ i = 0;
+ buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
+ buf[i++] = 0x64;
+ buf[i++] = 0x24;
+ buf[i++] = n * 8;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'. */
+
+static void
+i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ EMIT_ASM32 (i386_int_call_1_a,
+ /* Reserve a bit of stack space. */
+ "sub $0x8,%esp");
+ /* Put the one argument on the stack. */
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
+ buf[i++] = 0x04;
+ buf[i++] = 0x24;
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ i386_emit_call (fn);
+ EMIT_ASM32 (i386_int_call_1_c,
+ "mov %edx,%ebx\n\t"
+ "lea 0x8(%esp),%esp");
+}
+
+/* FN's prototype is `void(*fn)(int,LONGEST)'. */
+
+static void
+i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+ unsigned char buf[16];
+ int i;
+ CORE_ADDR buildaddr;
+
+ EMIT_ASM32 (i386_void_call_2_a,
+ /* Preserve %eax only; we don't have to worry about %ebx. */
+ "push %eax\n\t"
+ /* Reserve a bit of stack space for arguments. */
+ "sub $0x10,%esp\n\t"
+ /* Copy "top" to the second argument position. (Note that
+ we can't assume function won't scribble on its
+ arguments, so don't try to restore from this.) */
+ "mov %eax,4(%esp)\n\t"
+ "mov %ebx,8(%esp)");
+ /* Put the first argument on the stack. */
+ buildaddr = current_insn_ptr;
+ i = 0;
+ buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
+ buf[i++] = 0x04;
+ buf[i++] = 0x24;
+ memcpy (&buf[i], &arg1, sizeof (arg1));
+ i += 4;
+ append_insns (&buildaddr, i, buf);
+ current_insn_ptr = buildaddr;
+ i386_emit_call (fn);
+ EMIT_ASM32 (i386_void_call_2_b,
+ "lea 0x10(%esp),%esp\n\t"
+ /* Restore original stack top. */
+ "pop %eax");
+}
+
+
+void
+i386_emit_eq_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (eq,
+ /* Check low half first, more likely to be decider */
+ "cmpl %eax,(%esp)\n\t"
+ "jne .Leq_fallthru\n\t"
+ "cmpl %ebx,4(%esp)\n\t"
+ "jne .Leq_fallthru\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Leq_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 18;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_ne_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (ne,
+ /* Check low half first, more likely to be decider */
+ "cmpl %eax,(%esp)\n\t"
+ "jne .Lne_jump\n\t"
+ "cmpl %ebx,4(%esp)\n\t"
+ "je .Lne_fallthru\n\t"
+ ".Lne_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lne_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 18;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_lt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (lt,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jl .Llt_jump\n\t"
+ "jne .Llt_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jnl .Llt_fallthru\n\t"
+ ".Llt_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Llt_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_le_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (le,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jle .Lle_jump\n\t"
+ "jne .Lle_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jnle .Lle_fallthru\n\t"
+ ".Lle_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lle_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_gt_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (gt,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jg .Lgt_jump\n\t"
+ "jne .Lgt_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jng .Lgt_fallthru\n\t"
+ ".Lgt_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lgt_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+void
+i386_emit_ge_goto (int *offset_p, int *size_p)
+{
+ EMIT_ASM32 (ge,
+ "cmpl %ebx,4(%esp)\n\t"
+ "jge .Lge_jump\n\t"
+ "jne .Lge_fallthru\n\t"
+ "cmpl %eax,(%esp)\n\t"
+ "jnge .Lge_fallthru\n\t"
+ ".Lge_jump:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx\n\t"
+ /* jmp, but don't trust the assembler to choose the right jump */
+ ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
+ ".Lge_fallthru:\n\t"
+ "lea 0x8(%esp),%esp\n\t"
+ "pop %eax\n\t"
+ "pop %ebx");
+
+ if (offset_p)
+ *offset_p = 20;
+ if (size_p)
+ *size_p = 4;
+}
+
+struct emit_ops i386_emit_ops =
+ {
+ i386_emit_prologue,
+ i386_emit_epilogue,
+ i386_emit_add,
+ i386_emit_sub,
+ i386_emit_mul,
+ i386_emit_lsh,
+ i386_emit_rsh_signed,
+ i386_emit_rsh_unsigned,
+ i386_emit_ext,
+ i386_emit_log_not,
+ i386_emit_bit_and,
+ i386_emit_bit_or,
+ i386_emit_bit_xor,
+ i386_emit_bit_not,
+ i386_emit_equal,
+ i386_emit_less_signed,
+ i386_emit_less_unsigned,
+ i386_emit_ref,
+ i386_emit_if_goto,
+ i386_emit_goto,
+ i386_write_goto_address,
+ i386_emit_const,
+ i386_emit_call,
+ i386_emit_reg,
+ i386_emit_pop,
+ i386_emit_stack_flush,
+ i386_emit_zero_ext,
+ i386_emit_swap,
+ i386_emit_stack_adjust,
+ i386_emit_int_call_1,
+ i386_emit_void_call_2,
+ i386_emit_eq_goto,
+ i386_emit_ne_goto,
+ i386_emit_lt_goto,
+ i386_emit_le_goto,
+ i386_emit_gt_goto,
+ i386_emit_ge_goto
+ };
+
+
+static struct emit_ops *
+x86_emit_ops (void)
+{
+#ifdef __x86_64__
+ int use_64bit = register_size (0) == 8;
+
+ if (use_64bit)
+ return &amd64_emit_ops;
+ else
+#endif
+ return &i386_emit_ops;
+}
+