gdb:
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
0b302171 3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
d0722149
DE
23#include "server.h"
24#include "linux-low.h"
25#include "i387-fp.h"
aa5ca48f 26#include "i386-low.h"
1570b33e
L
27#include "i386-xstate.h"
28#include "elf/common.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
58b4daa5 31#include "agent.h"
d0722149 32
90884b2b 33/* Defined in auto-generated file i386-linux.c. */
d0722149 34void init_registers_i386_linux (void);
90884b2b
L
35/* Defined in auto-generated file amd64-linux.c. */
36void init_registers_amd64_linux (void);
1570b33e
L
37/* Defined in auto-generated file i386-avx-linux.c. */
38void init_registers_i386_avx_linux (void);
39/* Defined in auto-generated file amd64-avx-linux.c. */
40void init_registers_amd64_avx_linux (void);
3a13a53b
L
41/* Defined in auto-generated file i386-mmx-linux.c. */
42void init_registers_i386_mmx_linux (void);
1570b33e 43
fa593d66 44static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 45static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 46
1570b33e
L
47/* Backward compatibility for gdb without XML support. */
48
49static const char *xmltarget_i386_linux_no_xml = "@<target>\
50<architecture>i386</architecture>\
51<osabi>GNU/Linux</osabi>\
52</target>";
f6d1620c
L
53
54#ifdef __x86_64__
1570b33e
L
55static const char *xmltarget_amd64_linux_no_xml = "@<target>\
56<architecture>i386:x86-64</architecture>\
57<osabi>GNU/Linux</osabi>\
58</target>";
f6d1620c 59#endif
d0722149
DE
60
61#include <sys/reg.h>
62#include <sys/procfs.h>
63#include <sys/ptrace.h>
1570b33e
L
64#include <sys/uio.h>
65
66#ifndef PTRACE_GETREGSET
67#define PTRACE_GETREGSET 0x4204
68#endif
69
70#ifndef PTRACE_SETREGSET
71#define PTRACE_SETREGSET 0x4205
72#endif
73
d0722149
DE
74
75#ifndef PTRACE_GET_THREAD_AREA
76#define PTRACE_GET_THREAD_AREA 25
77#endif
78
79/* This definition comes from prctl.h, but some kernels may not have it. */
80#ifndef PTRACE_ARCH_PRCTL
81#define PTRACE_ARCH_PRCTL 30
82#endif
83
84/* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
86#ifndef ARCH_GET_FS
87#define ARCH_SET_GS 0x1001
88#define ARCH_SET_FS 0x1002
89#define ARCH_GET_FS 0x1003
90#define ARCH_GET_GS 0x1004
91#endif
92
aa5ca48f
DE
93/* Per-process arch-specific data we want to keep. */
94
95struct arch_process_info
96{
97 struct i386_debug_reg_state debug_reg_state;
98};
99
100/* Per-thread arch-specific data we want to keep. */
101
102struct arch_lwp_info
103{
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed;
106};
107
d0722149
DE
108#ifdef __x86_64__
109
110/* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113static /*const*/ int i386_regmap[] =
114{
115 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
116 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
117 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
118 DS * 8, ES * 8, FS * 8, GS * 8
119};
120
121#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122
123/* So code below doesn't have to care, i386 or amd64. */
124#define ORIG_EAX ORIG_RAX
125
126static const int x86_64_regmap[] =
127{
128 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
129 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
130 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
131 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
132 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
133 DS * 8, ES * 8, FS * 8, GS * 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
138 ORIG_RAX * 8
139};
140
141#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142
143#else /* ! __x86_64__ */
144
145/* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147static /*const*/ int i386_regmap[] =
148{
149 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
150 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
151 EIP * 4, EFL * 4, CS * 4, SS * 4,
152 DS * 4, ES * 4, FS * 4, GS * 4
153};
154
155#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
156
157#endif
158\f
159/* Called by libthread_db. */
160
161ps_err_e
162ps_get_thread_area (const struct ps_prochandle *ph,
163 lwpid_t lwpid, int idx, void **base)
164{
165#ifdef __x86_64__
166 int use_64bit = register_size (0) == 8;
167
168 if (use_64bit)
169 {
170 switch (idx)
171 {
172 case FS:
173 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
174 return PS_OK;
175 break;
176 case GS:
177 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
178 return PS_OK;
179 break;
180 default:
181 return PS_BADADDR;
182 }
183 return PS_ERR;
184 }
185#endif
186
187 {
188 unsigned int desc[4];
189
190 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
191 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
192 return PS_ERR;
193
194 *(int *)base = desc[1];
195 return PS_OK;
196 }
197}
fa593d66
PA
198
199/* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
203
204static int
205x86_get_thread_area (int lwpid, CORE_ADDR *addr)
206{
207#ifdef __x86_64__
208 int use_64bit = register_size (0) == 8;
209
210 if (use_64bit)
211 {
212 void *base;
213 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
214 {
215 *addr = (CORE_ADDR) (uintptr_t) base;
216 return 0;
217 }
218
219 return -1;
220 }
221#endif
222
223 {
224 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
225 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
226 unsigned int desc[4];
227 ULONGEST gs = 0;
228 const int reg_thread_area = 3; /* bits to scale down register value. */
229 int idx;
230
231 collect_register_by_name (regcache, "gs", &gs);
232
233 idx = gs >> reg_thread_area;
234
235 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
236 lwpid_of (lwp),
237 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
238 return -1;
239
240 *addr = desc[1];
241 return 0;
242 }
243}
244
245
d0722149
DE
246\f
247static int
248i386_cannot_store_register (int regno)
249{
250 return regno >= I386_NUM_REGS;
251}
252
253static int
254i386_cannot_fetch_register (int regno)
255{
256 return regno >= I386_NUM_REGS;
257}
258
259static void
442ea881 260x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
261{
262 int i;
263
264#ifdef __x86_64__
265 if (register_size (0) == 8)
266 {
267 for (i = 0; i < X86_64_NUM_REGS; i++)
268 if (x86_64_regmap[i] != -1)
442ea881 269 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
270 return;
271 }
272#endif
273
274 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 275 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 276
442ea881
PA
277 collect_register_by_name (regcache, "orig_eax",
278 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
279}
280
281static void
442ea881 282x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
283{
284 int i;
285
286#ifdef __x86_64__
287 if (register_size (0) == 8)
288 {
289 for (i = 0; i < X86_64_NUM_REGS; i++)
290 if (x86_64_regmap[i] != -1)
442ea881 291 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
292 return;
293 }
294#endif
295
296 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 297 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 298
442ea881
PA
299 supply_register_by_name (regcache, "orig_eax",
300 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
301}
302
303static void
442ea881 304x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
305{
306#ifdef __x86_64__
442ea881 307 i387_cache_to_fxsave (regcache, buf);
d0722149 308#else
442ea881 309 i387_cache_to_fsave (regcache, buf);
d0722149
DE
310#endif
311}
312
313static void
442ea881 314x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
315{
316#ifdef __x86_64__
442ea881 317 i387_fxsave_to_cache (regcache, buf);
d0722149 318#else
442ea881 319 i387_fsave_to_cache (regcache, buf);
d0722149
DE
320#endif
321}
322
323#ifndef __x86_64__
324
325static void
442ea881 326x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 327{
442ea881 328 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
329}
330
331static void
442ea881 332x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 333{
442ea881 334 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
335}
336
337#endif
338
1570b33e
L
339static void
340x86_fill_xstateregset (struct regcache *regcache, void *buf)
341{
342 i387_cache_to_xsave (regcache, buf);
343}
344
345static void
346x86_store_xstateregset (struct regcache *regcache, const void *buf)
347{
348 i387_xsave_to_cache (regcache, buf);
349}
350
d0722149
DE
351/* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
359
360struct regset_info target_regsets[] =
361{
362#ifdef HAVE_PTRACE_GETREGS
1570b33e 363 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
364 GENERAL_REGS,
365 x86_fill_gregset, x86_store_gregset },
1570b33e
L
366 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
367 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
368# ifndef __x86_64__
369# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 370 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
371 EXTENDED_REGS,
372 x86_fill_fpxregset, x86_store_fpxregset },
373# endif
374# endif
1570b33e 375 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
376 FP_REGS,
377 x86_fill_fpregset, x86_store_fpregset },
378#endif /* HAVE_PTRACE_GETREGS */
1570b33e 379 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
380};
381
382static CORE_ADDR
442ea881 383x86_get_pc (struct regcache *regcache)
d0722149
DE
384{
385 int use_64bit = register_size (0) == 8;
386
387 if (use_64bit)
388 {
389 unsigned long pc;
442ea881 390 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
391 return (CORE_ADDR) pc;
392 }
393 else
394 {
395 unsigned int pc;
442ea881 396 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
397 return (CORE_ADDR) pc;
398 }
399}
400
401static void
442ea881 402x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
403{
404 int use_64bit = register_size (0) == 8;
405
406 if (use_64bit)
407 {
408 unsigned long newpc = pc;
442ea881 409 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
410 }
411 else
412 {
413 unsigned int newpc = pc;
442ea881 414 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
415 }
416}
417\f
418static const unsigned char x86_breakpoint[] = { 0xCC };
419#define x86_breakpoint_len 1
420
421static int
422x86_breakpoint_at (CORE_ADDR pc)
423{
424 unsigned char c;
425
fc7238bb 426 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
427 if (c == 0xCC)
428 return 1;
429
430 return 0;
431}
432\f
aa5ca48f
DE
433/* Support for debug registers. */
434
435static unsigned long
436x86_linux_dr_get (ptid_t ptid, int regnum)
437{
438 int tid;
439 unsigned long value;
440
441 tid = ptid_get_lwp (ptid);
442
443 errno = 0;
444 value = ptrace (PTRACE_PEEKUSER, tid,
445 offsetof (struct user, u_debugreg[regnum]), 0);
446 if (errno != 0)
447 error ("Couldn't read debug register");
448
449 return value;
450}
451
452static void
453x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
454{
455 int tid;
456
457 tid = ptid_get_lwp (ptid);
458
459 errno = 0;
460 ptrace (PTRACE_POKEUSER, tid,
461 offsetof (struct user, u_debugreg[regnum]), value);
462 if (errno != 0)
463 error ("Couldn't write debug register");
464}
465
964e4306
PA
466static int
467update_debug_registers_callback (struct inferior_list_entry *entry,
468 void *pid_p)
469{
470 struct lwp_info *lwp = (struct lwp_info *) entry;
471 int pid = *(int *) pid_p;
472
473 /* Only update the threads of this process. */
474 if (pid_of (lwp) == pid)
475 {
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp->arch_private->debug_registers_changed = 1;
479
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
482 if (!lwp->stopped)
483 linux_stop_lwp (lwp);
484 }
485
486 return 0;
487}
488
aa5ca48f
DE
489/* Update the inferior's debug register REGNUM from STATE. */
490
491void
492i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
493{
964e4306 494 /* Only update the threads of this process. */
aa5ca48f
DE
495 int pid = pid_of (get_thread_lwp (current_inferior));
496
497 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
498 fatal ("Invalid debug register %d", regnum);
499
964e4306
PA
500 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
501}
aa5ca48f 502
964e4306 503/* Return the inferior's debug register REGNUM. */
aa5ca48f 504
964e4306
PA
505CORE_ADDR
506i386_dr_low_get_addr (int regnum)
507{
508 struct lwp_info *lwp = get_thread_lwp (current_inferior);
509 ptid_t ptid = ptid_of (lwp);
510
511 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 512 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
513
514 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
515}
516
517/* Update the inferior's DR7 debug control register from STATE. */
518
519void
520i386_dr_low_set_control (const struct i386_debug_reg_state *state)
521{
964e4306 522 /* Only update the threads of this process. */
aa5ca48f
DE
523 int pid = pid_of (get_thread_lwp (current_inferior));
524
964e4306
PA
525 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
526}
aa5ca48f 527
964e4306
PA
528/* Return the inferior's DR7 debug control register. */
529
530unsigned
531i386_dr_low_get_control (void)
532{
533 struct lwp_info *lwp = get_thread_lwp (current_inferior);
534 ptid_t ptid = ptid_of (lwp);
535
536 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
537}
538
539/* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
541
964e4306
PA
542unsigned
543i386_dr_low_get_status (void)
aa5ca48f
DE
544{
545 struct lwp_info *lwp = get_thread_lwp (current_inferior);
546 ptid_t ptid = ptid_of (lwp);
547
964e4306 548 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
549}
550\f
90d74c30 551/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
552
553static int
554x86_insert_point (char type, CORE_ADDR addr, int len)
555{
556 struct process_info *proc = current_process ();
557 switch (type)
558 {
8b07ae33 559 case '0':
90d74c30
PA
560 {
561 int ret;
562
563 ret = prepare_to_access_memory ();
564 if (ret)
565 return -1;
566 ret = set_gdb_breakpoint_at (addr);
0146f85b 567 done_accessing_memory ();
90d74c30
PA
568 return ret;
569 }
aa5ca48f
DE
570 case '2':
571 case '3':
572 case '4':
573 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
574 type, addr, len);
575 default:
576 /* Unsupported. */
577 return 1;
578 }
579}
580
581static int
582x86_remove_point (char type, CORE_ADDR addr, int len)
583{
584 struct process_info *proc = current_process ();
585 switch (type)
586 {
8b07ae33 587 case '0':
90d74c30
PA
588 {
589 int ret;
590
591 ret = prepare_to_access_memory ();
592 if (ret)
593 return -1;
594 ret = delete_gdb_breakpoint_at (addr);
0146f85b 595 done_accessing_memory ();
90d74c30
PA
596 return ret;
597 }
aa5ca48f
DE
598 case '2':
599 case '3':
600 case '4':
601 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
602 type, addr, len);
603 default:
604 /* Unsupported. */
605 return 1;
606 }
607}
608
609static int
610x86_stopped_by_watchpoint (void)
611{
612 struct process_info *proc = current_process ();
613 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
614}
615
616static CORE_ADDR
617x86_stopped_data_address (void)
618{
619 struct process_info *proc = current_process ();
620 CORE_ADDR addr;
621 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
622 &addr))
623 return addr;
624 return 0;
625}
626\f
627/* Called when a new process is created. */
628
629static struct arch_process_info *
630x86_linux_new_process (void)
631{
632 struct arch_process_info *info = xcalloc (1, sizeof (*info));
633
634 i386_low_init_dregs (&info->debug_reg_state);
635
636 return info;
637}
638
639/* Called when a new thread is detected. */
640
641static struct arch_lwp_info *
642x86_linux_new_thread (void)
643{
644 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
645
646 info->debug_registers_changed = 1;
647
648 return info;
649}
650
651/* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
653
654static void
655x86_linux_prepare_to_resume (struct lwp_info *lwp)
656{
b9a881c2 657 ptid_t ptid = ptid_of (lwp);
6210a125 658 int clear_status = 0;
b9a881c2 659
aa5ca48f
DE
660 if (lwp->arch_private->debug_registers_changed)
661 {
662 int i;
aa5ca48f
DE
663 int pid = ptid_get_pid (ptid);
664 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
665 struct i386_debug_reg_state *state
666 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
667
668 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
669 if (state->dr_ref_count[i] > 0)
670 {
671 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
672
673 /* If we're setting a watchpoint, any change the inferior
674 had done itself to the debug registers needs to be
675 discarded, otherwise, i386_low_stopped_data_address can
676 get confused. */
677 clear_status = 1;
678 }
aa5ca48f
DE
679
680 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
681
682 lwp->arch_private->debug_registers_changed = 0;
683 }
b9a881c2 684
6210a125 685 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 686 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
687}
688\f
d0722149
DE
689/* When GDBSERVER is built as a 64-bit application on linux, the
690 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
691 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
692 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
693 conversion in-place ourselves. */
694
695/* These types below (compat_*) define a siginfo type that is layout
696 compatible with the siginfo type exported by the 32-bit userspace
697 support. */
698
699#ifdef __x86_64__
700
701typedef int compat_int_t;
702typedef unsigned int compat_uptr_t;
703
704typedef int compat_time_t;
705typedef int compat_timer_t;
706typedef int compat_clock_t;
707
708struct compat_timeval
709{
710 compat_time_t tv_sec;
711 int tv_usec;
712};
713
714typedef union compat_sigval
715{
716 compat_int_t sival_int;
717 compat_uptr_t sival_ptr;
718} compat_sigval_t;
719
720typedef struct compat_siginfo
721{
722 int si_signo;
723 int si_errno;
724 int si_code;
725
726 union
727 {
728 int _pad[((128 / sizeof (int)) - 3)];
729
730 /* kill() */
731 struct
732 {
733 unsigned int _pid;
734 unsigned int _uid;
735 } _kill;
736
737 /* POSIX.1b timers */
738 struct
739 {
740 compat_timer_t _tid;
741 int _overrun;
742 compat_sigval_t _sigval;
743 } _timer;
744
745 /* POSIX.1b signals */
746 struct
747 {
748 unsigned int _pid;
749 unsigned int _uid;
750 compat_sigval_t _sigval;
751 } _rt;
752
753 /* SIGCHLD */
754 struct
755 {
756 unsigned int _pid;
757 unsigned int _uid;
758 int _status;
759 compat_clock_t _utime;
760 compat_clock_t _stime;
761 } _sigchld;
762
763 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
764 struct
765 {
766 unsigned int _addr;
767 } _sigfault;
768
769 /* SIGPOLL */
770 struct
771 {
772 int _band;
773 int _fd;
774 } _sigpoll;
775 } _sifields;
776} compat_siginfo_t;
777
778#define cpt_si_pid _sifields._kill._pid
779#define cpt_si_uid _sifields._kill._uid
780#define cpt_si_timerid _sifields._timer._tid
781#define cpt_si_overrun _sifields._timer._overrun
782#define cpt_si_status _sifields._sigchld._status
783#define cpt_si_utime _sifields._sigchld._utime
784#define cpt_si_stime _sifields._sigchld._stime
785#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
786#define cpt_si_addr _sifields._sigfault._addr
787#define cpt_si_band _sifields._sigpoll._band
788#define cpt_si_fd _sifields._sigpoll._fd
789
790/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
791 In their place is si_timer1,si_timer2. */
792#ifndef si_timerid
793#define si_timerid si_timer1
794#endif
795#ifndef si_overrun
796#define si_overrun si_timer2
797#endif
798
799static void
800compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
801{
802 memset (to, 0, sizeof (*to));
803
804 to->si_signo = from->si_signo;
805 to->si_errno = from->si_errno;
806 to->si_code = from->si_code;
807
b53a1623 808 if (to->si_code == SI_TIMER)
d0722149 809 {
b53a1623
PA
810 to->cpt_si_timerid = from->si_timerid;
811 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
812 to->cpt_si_ptr = (intptr_t) from->si_ptr;
813 }
814 else if (to->si_code == SI_USER)
815 {
816 to->cpt_si_pid = from->si_pid;
817 to->cpt_si_uid = from->si_uid;
818 }
b53a1623 819 else if (to->si_code < 0)
d0722149 820 {
b53a1623
PA
821 to->cpt_si_pid = from->si_pid;
822 to->cpt_si_uid = from->si_uid;
d0722149
DE
823 to->cpt_si_ptr = (intptr_t) from->si_ptr;
824 }
825 else
826 {
827 switch (to->si_signo)
828 {
829 case SIGCHLD:
830 to->cpt_si_pid = from->si_pid;
831 to->cpt_si_uid = from->si_uid;
832 to->cpt_si_status = from->si_status;
833 to->cpt_si_utime = from->si_utime;
834 to->cpt_si_stime = from->si_stime;
835 break;
836 case SIGILL:
837 case SIGFPE:
838 case SIGSEGV:
839 case SIGBUS:
840 to->cpt_si_addr = (intptr_t) from->si_addr;
841 break;
842 case SIGPOLL:
843 to->cpt_si_band = from->si_band;
844 to->cpt_si_fd = from->si_fd;
845 break;
846 default:
847 to->cpt_si_pid = from->si_pid;
848 to->cpt_si_uid = from->si_uid;
849 to->cpt_si_ptr = (intptr_t) from->si_ptr;
850 break;
851 }
852 }
853}
854
855static void
856siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
857{
858 memset (to, 0, sizeof (*to));
859
860 to->si_signo = from->si_signo;
861 to->si_errno = from->si_errno;
862 to->si_code = from->si_code;
863
b53a1623 864 if (to->si_code == SI_TIMER)
d0722149 865 {
b53a1623
PA
866 to->si_timerid = from->cpt_si_timerid;
867 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
868 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
869 }
870 else if (to->si_code == SI_USER)
871 {
872 to->si_pid = from->cpt_si_pid;
873 to->si_uid = from->cpt_si_uid;
874 }
b53a1623 875 else if (to->si_code < 0)
d0722149 876 {
b53a1623
PA
877 to->si_pid = from->cpt_si_pid;
878 to->si_uid = from->cpt_si_uid;
d0722149
DE
879 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
880 }
881 else
882 {
883 switch (to->si_signo)
884 {
885 case SIGCHLD:
886 to->si_pid = from->cpt_si_pid;
887 to->si_uid = from->cpt_si_uid;
888 to->si_status = from->cpt_si_status;
889 to->si_utime = from->cpt_si_utime;
890 to->si_stime = from->cpt_si_stime;
891 break;
892 case SIGILL:
893 case SIGFPE:
894 case SIGSEGV:
895 case SIGBUS:
896 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
897 break;
898 case SIGPOLL:
899 to->si_band = from->cpt_si_band;
900 to->si_fd = from->cpt_si_fd;
901 break;
902 default:
903 to->si_pid = from->cpt_si_pid;
904 to->si_uid = from->cpt_si_uid;
905 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
906 break;
907 }
908 }
909}
910
911#endif /* __x86_64__ */
912
913/* Convert a native/host siginfo object, into/from the siginfo in the
914 layout of the inferiors' architecture. Returns true if any
915 conversion was done; false otherwise. If DIRECTION is 1, then copy
916 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
917 INF. */
918
919static int
920x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
921{
922#ifdef __x86_64__
923 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
924 if (register_size (0) == 4)
925 {
9f1036c1
DE
926 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
927 fatal ("unexpected difference in siginfo");
d0722149
DE
928
929 if (direction == 0)
930 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
931 else
932 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
933
934 return 1;
935 }
936#endif
937
938 return 0;
939}
940\f
1570b33e
L
941static int use_xml;
942
943/* Update gdbserver_xmltarget. */
944
945static void
946x86_linux_update_xmltarget (void)
947{
3a13a53b
L
948 int pid;
949 struct regset_info *regset;
1570b33e
L
950 static unsigned long long xcr0;
951 static int have_ptrace_getregset = -1;
59e04013 952#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
953 static int have_ptrace_getfpxregs = -1;
954#endif
1570b33e
L
955
956 if (!current_inferior)
957 return;
958
45ba0d02
PA
959 /* Before changing the register cache internal layout or the target
960 regsets, flush the contents of the current valid caches back to
961 the threads. */
962 regcache_invalidate ();
963
3a13a53b 964 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
965#ifdef __x86_64__
966 if (num_xmm_registers == 8)
967 init_registers_i386_linux ();
968 else
969 init_registers_amd64_linux ();
970#else
3a13a53b
L
971 {
972# ifdef HAVE_PTRACE_GETFPXREGS
973 if (have_ptrace_getfpxregs == -1)
974 {
975 elf_fpxregset_t fpxregs;
976
977 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
978 {
979 have_ptrace_getfpxregs = 0;
980 x86_xcr0 = I386_XSTATE_X87_MASK;
981
982 /* Disable PTRACE_GETFPXREGS. */
983 for (regset = target_regsets;
984 regset->fill_function != NULL; regset++)
985 if (regset->get_request == PTRACE_GETFPXREGS)
986 {
987 regset->size = 0;
988 break;
989 }
990 }
991 else
992 have_ptrace_getfpxregs = 1;
993 }
994
995 if (!have_ptrace_getfpxregs)
996 {
997 init_registers_i386_mmx_linux ();
998 return;
999 }
1000# endif
1001 init_registers_i386_linux ();
1002 }
1570b33e
L
1003#endif
1004
1005 if (!use_xml)
1006 {
1007 /* Don't use XML. */
1008#ifdef __x86_64__
1009 if (num_xmm_registers == 8)
1010 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1011 else
1012 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1013#else
1014 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1015#endif
1016
1017 x86_xcr0 = I386_XSTATE_SSE_MASK;
1018
1019 return;
1020 }
1021
1022 /* Check if XSAVE extended state is supported. */
1023 if (have_ptrace_getregset == -1)
1024 {
1570b33e
L
1025 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1026 struct iovec iov;
1570b33e
L
1027
1028 iov.iov_base = xstateregs;
1029 iov.iov_len = sizeof (xstateregs);
1030
1031 /* Check if PTRACE_GETREGSET works. */
1032 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1033 &iov) < 0)
1034 {
1035 have_ptrace_getregset = 0;
1036 return;
1037 }
1038 else
1039 have_ptrace_getregset = 1;
1040
1041 /* Get XCR0 from XSAVE extended state at byte 464. */
1042 xcr0 = xstateregs[464 / sizeof (long long)];
1043
1044 /* Use PTRACE_GETREGSET if it is available. */
1045 for (regset = target_regsets;
1046 regset->fill_function != NULL; regset++)
1047 if (regset->get_request == PTRACE_GETREGSET)
1048 regset->size = I386_XSTATE_SIZE (xcr0);
1049 else if (regset->type != GENERAL_REGS)
1050 regset->size = 0;
1051 }
1052
1053 if (have_ptrace_getregset)
1054 {
1055 /* AVX is the highest feature we support. */
1056 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1057 {
1058 x86_xcr0 = xcr0;
1059
1060#ifdef __x86_64__
1061 /* I386 has 8 xmm regs. */
1062 if (num_xmm_registers == 8)
1063 init_registers_i386_avx_linux ();
1064 else
1065 init_registers_amd64_avx_linux ();
1066#else
1067 init_registers_i386_avx_linux ();
1068#endif
1069 }
1070 }
1071}
1072
1073/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1074 PTRACE_GETREGSET. */
1075
1076static void
1077x86_linux_process_qsupported (const char *query)
1078{
1079 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1080 with "i386" in qSupported query, it supports x86 XML target
1081 descriptions. */
1082 use_xml = 0;
1083 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1084 {
1085 char *copy = xstrdup (query + 13);
1086 char *p;
1087
1088 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1089 {
1090 if (strcmp (p, "i386") == 0)
1091 {
1092 use_xml = 1;
1093 break;
1094 }
1095 }
1096
1097 free (copy);
1098 }
1099
1100 x86_linux_update_xmltarget ();
1101}
1102
9f1036c1 1103/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1104
1105static void
1106x86_arch_setup (void)
1107{
1108#ifdef __x86_64__
1109 int pid = pid_of (get_thread_lwp (current_inferior));
be07f1a2 1110 int use_64bit = linux_pid_exe_is_elf_64_file (pid);
d0722149
DE
1111
1112 if (use_64bit < 0)
1113 {
1114 /* This can only happen if /proc/<pid>/exe is unreadable,
1115 but "that can't happen" if we've gotten this far.
1116 Fall through and assume this is a 32-bit program. */
1117 }
1118 else if (use_64bit)
1119 {
d0722149
DE
1120 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1121 the_low_target.num_regs = -1;
1122 the_low_target.regmap = NULL;
1123 the_low_target.cannot_fetch_register = NULL;
1124 the_low_target.cannot_store_register = NULL;
1125
1126 /* Amd64 has 16 xmm regs. */
1127 num_xmm_registers = 16;
1128
1570b33e 1129 x86_linux_update_xmltarget ();
d0722149
DE
1130 return;
1131 }
1132#endif
1133
1134 /* Ok we have a 32-bit inferior. */
1135
d0722149
DE
1136 the_low_target.num_regs = I386_NUM_REGS;
1137 the_low_target.regmap = i386_regmap;
1138 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1139 the_low_target.cannot_store_register = i386_cannot_store_register;
1140
1141 /* I386 has 8 xmm regs. */
1142 num_xmm_registers = 8;
1570b33e
L
1143
1144 x86_linux_update_xmltarget ();
d0722149
DE
1145}
1146
219f2f23
PA
1147static int
1148x86_supports_tracepoints (void)
1149{
1150 return 1;
1151}
1152
fa593d66
PA
1153static void
1154append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1155{
1156 write_inferior_memory (*to, buf, len);
1157 *to += len;
1158}
1159
1160static int
1161push_opcode (unsigned char *buf, char *op)
1162{
1163 unsigned char *buf_org = buf;
1164
1165 while (1)
1166 {
1167 char *endptr;
1168 unsigned long ul = strtoul (op, &endptr, 16);
1169
1170 if (endptr == op)
1171 break;
1172
1173 *buf++ = ul;
1174 op = endptr;
1175 }
1176
1177 return buf - buf_org;
1178}
1179
1180#ifdef __x86_64__
1181
1182/* Build a jump pad that saves registers and calls a collection
1183 function. Writes a jump instruction to the jump pad to
1184 JJUMPAD_INSN. The caller is responsible to write it in at the
1185 tracepoint address. */
1186
1187static int
1188amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1189 CORE_ADDR collector,
1190 CORE_ADDR lockaddr,
1191 ULONGEST orig_size,
1192 CORE_ADDR *jump_entry,
405f8e94
SS
1193 CORE_ADDR *trampoline,
1194 ULONGEST *trampoline_size,
fa593d66
PA
1195 unsigned char *jjump_pad_insn,
1196 ULONGEST *jjump_pad_insn_size,
1197 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1198 CORE_ADDR *adjusted_insn_addr_end,
1199 char *err)
fa593d66
PA
1200{
1201 unsigned char buf[40];
1202 int i, offset;
1203 CORE_ADDR buildaddr = *jump_entry;
1204
1205 /* Build the jump pad. */
1206
1207 /* First, do tracepoint data collection. Save registers. */
1208 i = 0;
1209 /* Need to ensure stack pointer saved first. */
1210 buf[i++] = 0x54; /* push %rsp */
1211 buf[i++] = 0x55; /* push %rbp */
1212 buf[i++] = 0x57; /* push %rdi */
1213 buf[i++] = 0x56; /* push %rsi */
1214 buf[i++] = 0x52; /* push %rdx */
1215 buf[i++] = 0x51; /* push %rcx */
1216 buf[i++] = 0x53; /* push %rbx */
1217 buf[i++] = 0x50; /* push %rax */
1218 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1219 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1220 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1221 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1222 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1223 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1224 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1225 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1226 buf[i++] = 0x9c; /* pushfq */
1227 buf[i++] = 0x48; /* movl <addr>,%rdi */
1228 buf[i++] = 0xbf;
1229 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1230 i += sizeof (unsigned long);
1231 buf[i++] = 0x57; /* push %rdi */
1232 append_insns (&buildaddr, i, buf);
1233
1234 /* Stack space for the collecting_t object. */
1235 i = 0;
1236 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1237 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1238 memcpy (buf + i, &tpoint, 8);
1239 i += 8;
1240 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1241 i += push_opcode (&buf[i],
1242 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1243 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1244 append_insns (&buildaddr, i, buf);
1245
1246 /* spin-lock. */
1247 i = 0;
1248 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1249 memcpy (&buf[i], (void *) &lockaddr, 8);
1250 i += 8;
1251 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1252 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1253 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1254 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1255 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1256 append_insns (&buildaddr, i, buf);
1257
1258 /* Set up the gdb_collect call. */
1259 /* At this point, (stack pointer + 0x18) is the base of our saved
1260 register block. */
1261
1262 i = 0;
1263 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1264 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1265
1266 /* tpoint address may be 64-bit wide. */
1267 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1268 memcpy (buf + i, &tpoint, 8);
1269 i += 8;
1270 append_insns (&buildaddr, i, buf);
1271
1272 /* The collector function being in the shared library, may be
1273 >31-bits away off the jump pad. */
1274 i = 0;
1275 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1276 memcpy (buf + i, &collector, 8);
1277 i += 8;
1278 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1279 append_insns (&buildaddr, i, buf);
1280
1281 /* Clear the spin-lock. */
1282 i = 0;
1283 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1284 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1285 memcpy (buf + i, &lockaddr, 8);
1286 i += 8;
1287 append_insns (&buildaddr, i, buf);
1288
1289 /* Remove stack that had been used for the collect_t object. */
1290 i = 0;
1291 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1292 append_insns (&buildaddr, i, buf);
1293
1294 /* Restore register state. */
1295 i = 0;
1296 buf[i++] = 0x48; /* add $0x8,%rsp */
1297 buf[i++] = 0x83;
1298 buf[i++] = 0xc4;
1299 buf[i++] = 0x08;
1300 buf[i++] = 0x9d; /* popfq */
1301 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1302 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1303 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1304 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1305 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1306 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1307 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1308 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1309 buf[i++] = 0x58; /* pop %rax */
1310 buf[i++] = 0x5b; /* pop %rbx */
1311 buf[i++] = 0x59; /* pop %rcx */
1312 buf[i++] = 0x5a; /* pop %rdx */
1313 buf[i++] = 0x5e; /* pop %rsi */
1314 buf[i++] = 0x5f; /* pop %rdi */
1315 buf[i++] = 0x5d; /* pop %rbp */
1316 buf[i++] = 0x5c; /* pop %rsp */
1317 append_insns (&buildaddr, i, buf);
1318
1319 /* Now, adjust the original instruction to execute in the jump
1320 pad. */
1321 *adjusted_insn_addr = buildaddr;
1322 relocate_instruction (&buildaddr, tpaddr);
1323 *adjusted_insn_addr_end = buildaddr;
1324
1325 /* Finally, write a jump back to the program. */
1326 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1327 memcpy (buf, jump_insn, sizeof (jump_insn));
1328 memcpy (buf + 1, &offset, 4);
1329 append_insns (&buildaddr, sizeof (jump_insn), buf);
1330
1331 /* The jump pad is now built. Wire in a jump to our jump pad. This
1332 is always done last (by our caller actually), so that we can
1333 install fast tracepoints with threads running. This relies on
1334 the agent's atomic write support. */
1335 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1336 memcpy (buf, jump_insn, sizeof (jump_insn));
1337 memcpy (buf + 1, &offset, 4);
1338 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1339 *jjump_pad_insn_size = sizeof (jump_insn);
1340
1341 /* Return the end address of our pad. */
1342 *jump_entry = buildaddr;
1343
1344 return 0;
1345}
1346
1347#endif /* __x86_64__ */
1348
1349/* Build a jump pad that saves registers and calls a collection
1350 function. Writes a jump instruction to the jump pad to
1351 JJUMPAD_INSN. The caller is responsible to write it in at the
1352 tracepoint address. */
1353
1354static int
1355i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1356 CORE_ADDR collector,
1357 CORE_ADDR lockaddr,
1358 ULONGEST orig_size,
1359 CORE_ADDR *jump_entry,
405f8e94
SS
1360 CORE_ADDR *trampoline,
1361 ULONGEST *trampoline_size,
fa593d66
PA
1362 unsigned char *jjump_pad_insn,
1363 ULONGEST *jjump_pad_insn_size,
1364 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1365 CORE_ADDR *adjusted_insn_addr_end,
1366 char *err)
fa593d66
PA
1367{
1368 unsigned char buf[0x100];
1369 int i, offset;
1370 CORE_ADDR buildaddr = *jump_entry;
1371
1372 /* Build the jump pad. */
1373
1374 /* First, do tracepoint data collection. Save registers. */
1375 i = 0;
1376 buf[i++] = 0x60; /* pushad */
1377 buf[i++] = 0x68; /* push tpaddr aka $pc */
1378 *((int *)(buf + i)) = (int) tpaddr;
1379 i += 4;
1380 buf[i++] = 0x9c; /* pushf */
1381 buf[i++] = 0x1e; /* push %ds */
1382 buf[i++] = 0x06; /* push %es */
1383 buf[i++] = 0x0f; /* push %fs */
1384 buf[i++] = 0xa0;
1385 buf[i++] = 0x0f; /* push %gs */
1386 buf[i++] = 0xa8;
1387 buf[i++] = 0x16; /* push %ss */
1388 buf[i++] = 0x0e; /* push %cs */
1389 append_insns (&buildaddr, i, buf);
1390
1391 /* Stack space for the collecting_t object. */
1392 i = 0;
1393 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1394
1395 /* Build the object. */
1396 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1397 memcpy (buf + i, &tpoint, 4);
1398 i += 4;
1399 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1400
1401 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1402 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1403 append_insns (&buildaddr, i, buf);
1404
1405 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1406 If we cared for it, this could be using xchg alternatively. */
1407
1408 i = 0;
1409 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1410 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1411 %esp,<lockaddr> */
1412 memcpy (&buf[i], (void *) &lockaddr, 4);
1413 i += 4;
1414 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1415 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1416 append_insns (&buildaddr, i, buf);
1417
1418
1419 /* Set up arguments to the gdb_collect call. */
1420 i = 0;
1421 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1422 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1423 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1424 append_insns (&buildaddr, i, buf);
1425
1426 i = 0;
1427 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1428 append_insns (&buildaddr, i, buf);
1429
1430 i = 0;
1431 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1432 memcpy (&buf[i], (void *) &tpoint, 4);
1433 i += 4;
1434 append_insns (&buildaddr, i, buf);
1435
1436 buf[0] = 0xe8; /* call <reladdr> */
1437 offset = collector - (buildaddr + sizeof (jump_insn));
1438 memcpy (buf + 1, &offset, 4);
1439 append_insns (&buildaddr, 5, buf);
1440 /* Clean up after the call. */
1441 buf[0] = 0x83; /* add $0x8,%esp */
1442 buf[1] = 0xc4;
1443 buf[2] = 0x08;
1444 append_insns (&buildaddr, 3, buf);
1445
1446
1447 /* Clear the spin-lock. This would need the LOCK prefix on older
1448 broken archs. */
1449 i = 0;
1450 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1451 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1452 memcpy (buf + i, &lockaddr, 4);
1453 i += 4;
1454 append_insns (&buildaddr, i, buf);
1455
1456
1457 /* Remove stack that had been used for the collect_t object. */
1458 i = 0;
1459 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1460 append_insns (&buildaddr, i, buf);
1461
1462 i = 0;
1463 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1464 buf[i++] = 0xc4;
1465 buf[i++] = 0x04;
1466 buf[i++] = 0x17; /* pop %ss */
1467 buf[i++] = 0x0f; /* pop %gs */
1468 buf[i++] = 0xa9;
1469 buf[i++] = 0x0f; /* pop %fs */
1470 buf[i++] = 0xa1;
1471 buf[i++] = 0x07; /* pop %es */
405f8e94 1472 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1473 buf[i++] = 0x9d; /* popf */
1474 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1475 buf[i++] = 0xc4;
1476 buf[i++] = 0x04;
1477 buf[i++] = 0x61; /* popad */
1478 append_insns (&buildaddr, i, buf);
1479
1480 /* Now, adjust the original instruction to execute in the jump
1481 pad. */
1482 *adjusted_insn_addr = buildaddr;
1483 relocate_instruction (&buildaddr, tpaddr);
1484 *adjusted_insn_addr_end = buildaddr;
1485
1486 /* Write the jump back to the program. */
1487 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1488 memcpy (buf, jump_insn, sizeof (jump_insn));
1489 memcpy (buf + 1, &offset, 4);
1490 append_insns (&buildaddr, sizeof (jump_insn), buf);
1491
1492 /* The jump pad is now built. Wire in a jump to our jump pad. This
1493 is always done last (by our caller actually), so that we can
1494 install fast tracepoints with threads running. This relies on
1495 the agent's atomic write support. */
405f8e94
SS
1496 if (orig_size == 4)
1497 {
1498 /* Create a trampoline. */
1499 *trampoline_size = sizeof (jump_insn);
1500 if (!claim_trampoline_space (*trampoline_size, trampoline))
1501 {
1502 /* No trampoline space available. */
1503 strcpy (err,
1504 "E.Cannot allocate trampoline space needed for fast "
1505 "tracepoints on 4-byte instructions.");
1506 return 1;
1507 }
1508
1509 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1510 memcpy (buf, jump_insn, sizeof (jump_insn));
1511 memcpy (buf + 1, &offset, 4);
1512 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1513
1514 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1515 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1516 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1517 memcpy (buf + 2, &offset, 2);
1518 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1519 *jjump_pad_insn_size = sizeof (small_jump_insn);
1520 }
1521 else
1522 {
1523 /* Else use a 32-bit relative jump instruction. */
1524 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1525 memcpy (buf, jump_insn, sizeof (jump_insn));
1526 memcpy (buf + 1, &offset, 4);
1527 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1528 *jjump_pad_insn_size = sizeof (jump_insn);
1529 }
fa593d66
PA
1530
1531 /* Return the end address of our pad. */
1532 *jump_entry = buildaddr;
1533
1534 return 0;
1535}
1536
1537static int
1538x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1539 CORE_ADDR collector,
1540 CORE_ADDR lockaddr,
1541 ULONGEST orig_size,
1542 CORE_ADDR *jump_entry,
405f8e94
SS
1543 CORE_ADDR *trampoline,
1544 ULONGEST *trampoline_size,
fa593d66
PA
1545 unsigned char *jjump_pad_insn,
1546 ULONGEST *jjump_pad_insn_size,
1547 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1548 CORE_ADDR *adjusted_insn_addr_end,
1549 char *err)
fa593d66
PA
1550{
1551#ifdef __x86_64__
1552 if (register_size (0) == 8)
1553 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1554 collector, lockaddr,
1555 orig_size, jump_entry,
405f8e94 1556 trampoline, trampoline_size,
fa593d66
PA
1557 jjump_pad_insn,
1558 jjump_pad_insn_size,
1559 adjusted_insn_addr,
405f8e94
SS
1560 adjusted_insn_addr_end,
1561 err);
fa593d66
PA
1562#endif
1563
1564 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1565 collector, lockaddr,
1566 orig_size, jump_entry,
405f8e94 1567 trampoline, trampoline_size,
fa593d66
PA
1568 jjump_pad_insn,
1569 jjump_pad_insn_size,
1570 adjusted_insn_addr,
405f8e94
SS
1571 adjusted_insn_addr_end,
1572 err);
1573}
1574
1575/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1576 architectures. */
1577
1578static int
1579x86_get_min_fast_tracepoint_insn_len (void)
1580{
1581 static int warned_about_fast_tracepoints = 0;
1582
1583#ifdef __x86_64__
1584 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1585 used for fast tracepoints. */
1586 if (register_size (0) == 8)
1587 return 5;
1588#endif
1589
58b4daa5 1590 if (agent_loaded_p ())
405f8e94
SS
1591 {
1592 char errbuf[IPA_BUFSIZ];
1593
1594 errbuf[0] = '\0';
1595
1596 /* On x86, if trampolines are available, then 4-byte jump instructions
1597 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1598 with a 4-byte offset are used instead. */
1599 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1600 return 4;
1601 else
1602 {
1603 /* GDB has no channel to explain to user why a shorter fast
1604 tracepoint is not possible, but at least make GDBserver
1605 mention that something has gone awry. */
1606 if (!warned_about_fast_tracepoints)
1607 {
1608 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1609 warned_about_fast_tracepoints = 1;
1610 }
1611 return 5;
1612 }
1613 }
1614 else
1615 {
1616 /* Indicate that the minimum length is currently unknown since the IPA
1617 has not loaded yet. */
1618 return 0;
1619 }
fa593d66
PA
1620}
1621
6a271cae
PA
1622static void
1623add_insns (unsigned char *start, int len)
1624{
1625 CORE_ADDR buildaddr = current_insn_ptr;
1626
1627 if (debug_threads)
1628 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1629 len, paddress (buildaddr));
1630
1631 append_insns (&buildaddr, len, start);
1632 current_insn_ptr = buildaddr;
1633}
1634
6a271cae
PA
1635/* Our general strategy for emitting code is to avoid specifying raw
1636 bytes whenever possible, and instead copy a block of inline asm
1637 that is embedded in the function. This is a little messy, because
1638 we need to keep the compiler from discarding what looks like dead
1639 code, plus suppress various warnings. */
1640
9e4344e5
PA
1641#define EMIT_ASM(NAME, INSNS) \
1642 do \
1643 { \
1644 extern unsigned char start_ ## NAME, end_ ## NAME; \
1645 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1646 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1647 "\t" "start_" #NAME ":" \
1648 "\t" INSNS "\n" \
1649 "\t" "end_" #NAME ":"); \
1650 } while (0)
6a271cae
PA
1651
1652#ifdef __x86_64__
1653
1654#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1655 do \
1656 { \
1657 extern unsigned char start_ ## NAME, end_ ## NAME; \
1658 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1659 __asm__ (".code32\n" \
1660 "\t" "jmp end_" #NAME "\n" \
1661 "\t" "start_" #NAME ":\n" \
1662 "\t" INSNS "\n" \
1663 "\t" "end_" #NAME ":\n" \
1664 ".code64\n"); \
1665 } while (0)
6a271cae
PA
1666
1667#else
1668
1669#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1670
1671#endif
1672
1673#ifdef __x86_64__
1674
1675static void
1676amd64_emit_prologue (void)
1677{
1678 EMIT_ASM (amd64_prologue,
1679 "pushq %rbp\n\t"
1680 "movq %rsp,%rbp\n\t"
1681 "sub $0x20,%rsp\n\t"
1682 "movq %rdi,-8(%rbp)\n\t"
1683 "movq %rsi,-16(%rbp)");
1684}
1685
1686
1687static void
1688amd64_emit_epilogue (void)
1689{
1690 EMIT_ASM (amd64_epilogue,
1691 "movq -16(%rbp),%rdi\n\t"
1692 "movq %rax,(%rdi)\n\t"
1693 "xor %rax,%rax\n\t"
1694 "leave\n\t"
1695 "ret");
1696}
1697
1698static void
1699amd64_emit_add (void)
1700{
1701 EMIT_ASM (amd64_add,
1702 "add (%rsp),%rax\n\t"
1703 "lea 0x8(%rsp),%rsp");
1704}
1705
1706static void
1707amd64_emit_sub (void)
1708{
1709 EMIT_ASM (amd64_sub,
1710 "sub %rax,(%rsp)\n\t"
1711 "pop %rax");
1712}
1713
1714static void
1715amd64_emit_mul (void)
1716{
1717 emit_error = 1;
1718}
1719
1720static void
1721amd64_emit_lsh (void)
1722{
1723 emit_error = 1;
1724}
1725
1726static void
1727amd64_emit_rsh_signed (void)
1728{
1729 emit_error = 1;
1730}
1731
1732static void
1733amd64_emit_rsh_unsigned (void)
1734{
1735 emit_error = 1;
1736}
1737
1738static void
1739amd64_emit_ext (int arg)
1740{
1741 switch (arg)
1742 {
1743 case 8:
1744 EMIT_ASM (amd64_ext_8,
1745 "cbtw\n\t"
1746 "cwtl\n\t"
1747 "cltq");
1748 break;
1749 case 16:
1750 EMIT_ASM (amd64_ext_16,
1751 "cwtl\n\t"
1752 "cltq");
1753 break;
1754 case 32:
1755 EMIT_ASM (amd64_ext_32,
1756 "cltq");
1757 break;
1758 default:
1759 emit_error = 1;
1760 }
1761}
1762
1763static void
1764amd64_emit_log_not (void)
1765{
1766 EMIT_ASM (amd64_log_not,
1767 "test %rax,%rax\n\t"
1768 "sete %cl\n\t"
1769 "movzbq %cl,%rax");
1770}
1771
1772static void
1773amd64_emit_bit_and (void)
1774{
1775 EMIT_ASM (amd64_and,
1776 "and (%rsp),%rax\n\t"
1777 "lea 0x8(%rsp),%rsp");
1778}
1779
1780static void
1781amd64_emit_bit_or (void)
1782{
1783 EMIT_ASM (amd64_or,
1784 "or (%rsp),%rax\n\t"
1785 "lea 0x8(%rsp),%rsp");
1786}
1787
1788static void
1789amd64_emit_bit_xor (void)
1790{
1791 EMIT_ASM (amd64_xor,
1792 "xor (%rsp),%rax\n\t"
1793 "lea 0x8(%rsp),%rsp");
1794}
1795
1796static void
1797amd64_emit_bit_not (void)
1798{
1799 EMIT_ASM (amd64_bit_not,
1800 "xorq $0xffffffffffffffff,%rax");
1801}
1802
1803static void
1804amd64_emit_equal (void)
1805{
1806 EMIT_ASM (amd64_equal,
1807 "cmp %rax,(%rsp)\n\t"
1808 "je .Lamd64_equal_true\n\t"
1809 "xor %rax,%rax\n\t"
1810 "jmp .Lamd64_equal_end\n\t"
1811 ".Lamd64_equal_true:\n\t"
1812 "mov $0x1,%rax\n\t"
1813 ".Lamd64_equal_end:\n\t"
1814 "lea 0x8(%rsp),%rsp");
1815}
1816
1817static void
1818amd64_emit_less_signed (void)
1819{
1820 EMIT_ASM (amd64_less_signed,
1821 "cmp %rax,(%rsp)\n\t"
1822 "jl .Lamd64_less_signed_true\n\t"
1823 "xor %rax,%rax\n\t"
1824 "jmp .Lamd64_less_signed_end\n\t"
1825 ".Lamd64_less_signed_true:\n\t"
1826 "mov $1,%rax\n\t"
1827 ".Lamd64_less_signed_end:\n\t"
1828 "lea 0x8(%rsp),%rsp");
1829}
1830
1831static void
1832amd64_emit_less_unsigned (void)
1833{
1834 EMIT_ASM (amd64_less_unsigned,
1835 "cmp %rax,(%rsp)\n\t"
1836 "jb .Lamd64_less_unsigned_true\n\t"
1837 "xor %rax,%rax\n\t"
1838 "jmp .Lamd64_less_unsigned_end\n\t"
1839 ".Lamd64_less_unsigned_true:\n\t"
1840 "mov $1,%rax\n\t"
1841 ".Lamd64_less_unsigned_end:\n\t"
1842 "lea 0x8(%rsp),%rsp");
1843}
1844
1845static void
1846amd64_emit_ref (int size)
1847{
1848 switch (size)
1849 {
1850 case 1:
1851 EMIT_ASM (amd64_ref1,
1852 "movb (%rax),%al");
1853 break;
1854 case 2:
1855 EMIT_ASM (amd64_ref2,
1856 "movw (%rax),%ax");
1857 break;
1858 case 4:
1859 EMIT_ASM (amd64_ref4,
1860 "movl (%rax),%eax");
1861 break;
1862 case 8:
1863 EMIT_ASM (amd64_ref8,
1864 "movq (%rax),%rax");
1865 break;
1866 }
1867}
1868
1869static void
1870amd64_emit_if_goto (int *offset_p, int *size_p)
1871{
1872 EMIT_ASM (amd64_if_goto,
1873 "mov %rax,%rcx\n\t"
1874 "pop %rax\n\t"
1875 "cmp $0,%rcx\n\t"
1876 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1877 if (offset_p)
1878 *offset_p = 10;
1879 if (size_p)
1880 *size_p = 4;
1881}
1882
1883static void
1884amd64_emit_goto (int *offset_p, int *size_p)
1885{
1886 EMIT_ASM (amd64_goto,
1887 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1888 if (offset_p)
1889 *offset_p = 1;
1890 if (size_p)
1891 *size_p = 4;
1892}
1893
1894static void
1895amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1896{
1897 int diff = (to - (from + size));
1898 unsigned char buf[sizeof (int)];
1899
1900 if (size != 4)
1901 {
1902 emit_error = 1;
1903 return;
1904 }
1905
1906 memcpy (buf, &diff, sizeof (int));
1907 write_inferior_memory (from, buf, sizeof (int));
1908}
1909
1910static void
4e29fb54 1911amd64_emit_const (LONGEST num)
6a271cae
PA
1912{
1913 unsigned char buf[16];
1914 int i;
1915 CORE_ADDR buildaddr = current_insn_ptr;
1916
1917 i = 0;
1918 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1919 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1920 i += 8;
1921 append_insns (&buildaddr, i, buf);
1922 current_insn_ptr = buildaddr;
1923}
1924
1925static void
1926amd64_emit_call (CORE_ADDR fn)
1927{
1928 unsigned char buf[16];
1929 int i;
1930 CORE_ADDR buildaddr;
4e29fb54 1931 LONGEST offset64;
6a271cae
PA
1932
1933 /* The destination function being in the shared library, may be
1934 >31-bits away off the compiled code pad. */
1935
1936 buildaddr = current_insn_ptr;
1937
1938 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1939
1940 i = 0;
1941
1942 if (offset64 > INT_MAX || offset64 < INT_MIN)
1943 {
1944 /* Offset is too large for a call. Use callq, but that requires
1945 a register, so avoid it if possible. Use r10, since it is
1946 call-clobbered, we don't have to push/pop it. */
1947 buf[i++] = 0x48; /* mov $fn,%r10 */
1948 buf[i++] = 0xba;
1949 memcpy (buf + i, &fn, 8);
1950 i += 8;
1951 buf[i++] = 0xff; /* callq *%r10 */
1952 buf[i++] = 0xd2;
1953 }
1954 else
1955 {
1956 int offset32 = offset64; /* we know we can't overflow here. */
1957 memcpy (buf + i, &offset32, 4);
1958 i += 4;
1959 }
1960
1961 append_insns (&buildaddr, i, buf);
1962 current_insn_ptr = buildaddr;
1963}
1964
1965static void
1966amd64_emit_reg (int reg)
1967{
1968 unsigned char buf[16];
1969 int i;
1970 CORE_ADDR buildaddr;
1971
1972 /* Assume raw_regs is still in %rdi. */
1973 buildaddr = current_insn_ptr;
1974 i = 0;
1975 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1976 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1977 i += 4;
1978 append_insns (&buildaddr, i, buf);
1979 current_insn_ptr = buildaddr;
1980 amd64_emit_call (get_raw_reg_func_addr ());
1981}
1982
1983static void
1984amd64_emit_pop (void)
1985{
1986 EMIT_ASM (amd64_pop,
1987 "pop %rax");
1988}
1989
1990static void
1991amd64_emit_stack_flush (void)
1992{
1993 EMIT_ASM (amd64_stack_flush,
1994 "push %rax");
1995}
1996
1997static void
1998amd64_emit_zero_ext (int arg)
1999{
2000 switch (arg)
2001 {
2002 case 8:
2003 EMIT_ASM (amd64_zero_ext_8,
2004 "and $0xff,%rax");
2005 break;
2006 case 16:
2007 EMIT_ASM (amd64_zero_ext_16,
2008 "and $0xffff,%rax");
2009 break;
2010 case 32:
2011 EMIT_ASM (amd64_zero_ext_32,
2012 "mov $0xffffffff,%rcx\n\t"
2013 "and %rcx,%rax");
2014 break;
2015 default:
2016 emit_error = 1;
2017 }
2018}
2019
2020static void
2021amd64_emit_swap (void)
2022{
2023 EMIT_ASM (amd64_swap,
2024 "mov %rax,%rcx\n\t"
2025 "pop %rax\n\t"
2026 "push %rcx");
2027}
2028
2029static void
2030amd64_emit_stack_adjust (int n)
2031{
2032 unsigned char buf[16];
2033 int i;
2034 CORE_ADDR buildaddr = current_insn_ptr;
2035
2036 i = 0;
2037 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2038 buf[i++] = 0x8d;
2039 buf[i++] = 0x64;
2040 buf[i++] = 0x24;
2041 /* This only handles adjustments up to 16, but we don't expect any more. */
2042 buf[i++] = n * 8;
2043 append_insns (&buildaddr, i, buf);
2044 current_insn_ptr = buildaddr;
2045}
2046
2047/* FN's prototype is `LONGEST(*fn)(int)'. */
2048
2049static void
2050amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2051{
2052 unsigned char buf[16];
2053 int i;
2054 CORE_ADDR buildaddr;
2055
2056 buildaddr = current_insn_ptr;
2057 i = 0;
2058 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2059 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2060 i += 4;
2061 append_insns (&buildaddr, i, buf);
2062 current_insn_ptr = buildaddr;
2063 amd64_emit_call (fn);
2064}
2065
4e29fb54 2066/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2067
2068static void
2069amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2070{
2071 unsigned char buf[16];
2072 int i;
2073 CORE_ADDR buildaddr;
2074
2075 buildaddr = current_insn_ptr;
2076 i = 0;
2077 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2078 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2079 i += 4;
2080 append_insns (&buildaddr, i, buf);
2081 current_insn_ptr = buildaddr;
2082 EMIT_ASM (amd64_void_call_2_a,
2083 /* Save away a copy of the stack top. */
2084 "push %rax\n\t"
2085 /* Also pass top as the second argument. */
2086 "mov %rax,%rsi");
2087 amd64_emit_call (fn);
2088 EMIT_ASM (amd64_void_call_2_b,
2089 /* Restore the stack top, %rax may have been trashed. */
2090 "pop %rax");
2091}
2092
6b9801d4
SS
2093void
2094amd64_emit_eq_goto (int *offset_p, int *size_p)
2095{
2096 EMIT_ASM (amd64_eq,
2097 "cmp %rax,(%rsp)\n\t"
2098 "jne .Lamd64_eq_fallthru\n\t"
2099 "lea 0x8(%rsp),%rsp\n\t"
2100 "pop %rax\n\t"
2101 /* jmp, but don't trust the assembler to choose the right jump */
2102 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2103 ".Lamd64_eq_fallthru:\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2105 "pop %rax");
2106
2107 if (offset_p)
2108 *offset_p = 13;
2109 if (size_p)
2110 *size_p = 4;
2111}
2112
2113void
2114amd64_emit_ne_goto (int *offset_p, int *size_p)
2115{
2116 EMIT_ASM (amd64_ne,
2117 "cmp %rax,(%rsp)\n\t"
2118 "je .Lamd64_ne_fallthru\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2120 "pop %rax\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_ne_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2125 "pop %rax");
2126
2127 if (offset_p)
2128 *offset_p = 13;
2129 if (size_p)
2130 *size_p = 4;
2131}
2132
2133void
2134amd64_emit_lt_goto (int *offset_p, int *size_p)
2135{
2136 EMIT_ASM (amd64_lt,
2137 "cmp %rax,(%rsp)\n\t"
2138 "jnl .Lamd64_lt_fallthru\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2140 "pop %rax\n\t"
2141 /* jmp, but don't trust the assembler to choose the right jump */
2142 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2143 ".Lamd64_lt_fallthru:\n\t"
2144 "lea 0x8(%rsp),%rsp\n\t"
2145 "pop %rax");
2146
2147 if (offset_p)
2148 *offset_p = 13;
2149 if (size_p)
2150 *size_p = 4;
2151}
2152
2153void
2154amd64_emit_le_goto (int *offset_p, int *size_p)
2155{
2156 EMIT_ASM (amd64_le,
2157 "cmp %rax,(%rsp)\n\t"
2158 "jnle .Lamd64_le_fallthru\n\t"
2159 "lea 0x8(%rsp),%rsp\n\t"
2160 "pop %rax\n\t"
2161 /* jmp, but don't trust the assembler to choose the right jump */
2162 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2163 ".Lamd64_le_fallthru:\n\t"
2164 "lea 0x8(%rsp),%rsp\n\t"
2165 "pop %rax");
2166
2167 if (offset_p)
2168 *offset_p = 13;
2169 if (size_p)
2170 *size_p = 4;
2171}
2172
2173void
2174amd64_emit_gt_goto (int *offset_p, int *size_p)
2175{
2176 EMIT_ASM (amd64_gt,
2177 "cmp %rax,(%rsp)\n\t"
2178 "jng .Lamd64_gt_fallthru\n\t"
2179 "lea 0x8(%rsp),%rsp\n\t"
2180 "pop %rax\n\t"
2181 /* jmp, but don't trust the assembler to choose the right jump */
2182 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2183 ".Lamd64_gt_fallthru:\n\t"
2184 "lea 0x8(%rsp),%rsp\n\t"
2185 "pop %rax");
2186
2187 if (offset_p)
2188 *offset_p = 13;
2189 if (size_p)
2190 *size_p = 4;
2191}
2192
2193void
2194amd64_emit_ge_goto (int *offset_p, int *size_p)
2195{
2196 EMIT_ASM (amd64_ge,
2197 "cmp %rax,(%rsp)\n\t"
2198 "jnge .Lamd64_ge_fallthru\n\t"
2199 ".Lamd64_ge_jump:\n\t"
2200 "lea 0x8(%rsp),%rsp\n\t"
2201 "pop %rax\n\t"
2202 /* jmp, but don't trust the assembler to choose the right jump */
2203 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2204 ".Lamd64_ge_fallthru:\n\t"
2205 "lea 0x8(%rsp),%rsp\n\t"
2206 "pop %rax");
2207
2208 if (offset_p)
2209 *offset_p = 13;
2210 if (size_p)
2211 *size_p = 4;
2212}
2213
6a271cae
PA
2214struct emit_ops amd64_emit_ops =
2215 {
2216 amd64_emit_prologue,
2217 amd64_emit_epilogue,
2218 amd64_emit_add,
2219 amd64_emit_sub,
2220 amd64_emit_mul,
2221 amd64_emit_lsh,
2222 amd64_emit_rsh_signed,
2223 amd64_emit_rsh_unsigned,
2224 amd64_emit_ext,
2225 amd64_emit_log_not,
2226 amd64_emit_bit_and,
2227 amd64_emit_bit_or,
2228 amd64_emit_bit_xor,
2229 amd64_emit_bit_not,
2230 amd64_emit_equal,
2231 amd64_emit_less_signed,
2232 amd64_emit_less_unsigned,
2233 amd64_emit_ref,
2234 amd64_emit_if_goto,
2235 amd64_emit_goto,
2236 amd64_write_goto_address,
2237 amd64_emit_const,
2238 amd64_emit_call,
2239 amd64_emit_reg,
2240 amd64_emit_pop,
2241 amd64_emit_stack_flush,
2242 amd64_emit_zero_ext,
2243 amd64_emit_swap,
2244 amd64_emit_stack_adjust,
2245 amd64_emit_int_call_1,
6b9801d4
SS
2246 amd64_emit_void_call_2,
2247 amd64_emit_eq_goto,
2248 amd64_emit_ne_goto,
2249 amd64_emit_lt_goto,
2250 amd64_emit_le_goto,
2251 amd64_emit_gt_goto,
2252 amd64_emit_ge_goto
6a271cae
PA
2253 };
2254
2255#endif /* __x86_64__ */
2256
2257static void
2258i386_emit_prologue (void)
2259{
2260 EMIT_ASM32 (i386_prologue,
2261 "push %ebp\n\t"
bf15cbda
SS
2262 "mov %esp,%ebp\n\t"
2263 "push %ebx");
6a271cae
PA
2264 /* At this point, the raw regs base address is at 8(%ebp), and the
2265 value pointer is at 12(%ebp). */
2266}
2267
2268static void
2269i386_emit_epilogue (void)
2270{
2271 EMIT_ASM32 (i386_epilogue,
2272 "mov 12(%ebp),%ecx\n\t"
2273 "mov %eax,(%ecx)\n\t"
2274 "mov %ebx,0x4(%ecx)\n\t"
2275 "xor %eax,%eax\n\t"
bf15cbda 2276 "pop %ebx\n\t"
6a271cae
PA
2277 "pop %ebp\n\t"
2278 "ret");
2279}
2280
2281static void
2282i386_emit_add (void)
2283{
2284 EMIT_ASM32 (i386_add,
2285 "add (%esp),%eax\n\t"
2286 "adc 0x4(%esp),%ebx\n\t"
2287 "lea 0x8(%esp),%esp");
2288}
2289
2290static void
2291i386_emit_sub (void)
2292{
2293 EMIT_ASM32 (i386_sub,
2294 "subl %eax,(%esp)\n\t"
2295 "sbbl %ebx,4(%esp)\n\t"
2296 "pop %eax\n\t"
2297 "pop %ebx\n\t");
2298}
2299
2300static void
2301i386_emit_mul (void)
2302{
2303 emit_error = 1;
2304}
2305
2306static void
2307i386_emit_lsh (void)
2308{
2309 emit_error = 1;
2310}
2311
2312static void
2313i386_emit_rsh_signed (void)
2314{
2315 emit_error = 1;
2316}
2317
2318static void
2319i386_emit_rsh_unsigned (void)
2320{
2321 emit_error = 1;
2322}
2323
2324static void
2325i386_emit_ext (int arg)
2326{
2327 switch (arg)
2328 {
2329 case 8:
2330 EMIT_ASM32 (i386_ext_8,
2331 "cbtw\n\t"
2332 "cwtl\n\t"
2333 "movl %eax,%ebx\n\t"
2334 "sarl $31,%ebx");
2335 break;
2336 case 16:
2337 EMIT_ASM32 (i386_ext_16,
2338 "cwtl\n\t"
2339 "movl %eax,%ebx\n\t"
2340 "sarl $31,%ebx");
2341 break;
2342 case 32:
2343 EMIT_ASM32 (i386_ext_32,
2344 "movl %eax,%ebx\n\t"
2345 "sarl $31,%ebx");
2346 break;
2347 default:
2348 emit_error = 1;
2349 }
2350}
2351
2352static void
2353i386_emit_log_not (void)
2354{
2355 EMIT_ASM32 (i386_log_not,
2356 "or %ebx,%eax\n\t"
2357 "test %eax,%eax\n\t"
2358 "sete %cl\n\t"
2359 "xor %ebx,%ebx\n\t"
2360 "movzbl %cl,%eax");
2361}
2362
2363static void
2364i386_emit_bit_and (void)
2365{
2366 EMIT_ASM32 (i386_and,
2367 "and (%esp),%eax\n\t"
2368 "and 0x4(%esp),%ebx\n\t"
2369 "lea 0x8(%esp),%esp");
2370}
2371
2372static void
2373i386_emit_bit_or (void)
2374{
2375 EMIT_ASM32 (i386_or,
2376 "or (%esp),%eax\n\t"
2377 "or 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2379}
2380
2381static void
2382i386_emit_bit_xor (void)
2383{
2384 EMIT_ASM32 (i386_xor,
2385 "xor (%esp),%eax\n\t"
2386 "xor 0x4(%esp),%ebx\n\t"
2387 "lea 0x8(%esp),%esp");
2388}
2389
2390static void
2391i386_emit_bit_not (void)
2392{
2393 EMIT_ASM32 (i386_bit_not,
2394 "xor $0xffffffff,%eax\n\t"
2395 "xor $0xffffffff,%ebx\n\t");
2396}
2397
2398static void
2399i386_emit_equal (void)
2400{
2401 EMIT_ASM32 (i386_equal,
2402 "cmpl %ebx,4(%esp)\n\t"
2403 "jne .Li386_equal_false\n\t"
2404 "cmpl %eax,(%esp)\n\t"
2405 "je .Li386_equal_true\n\t"
2406 ".Li386_equal_false:\n\t"
2407 "xor %eax,%eax\n\t"
2408 "jmp .Li386_equal_end\n\t"
2409 ".Li386_equal_true:\n\t"
2410 "mov $1,%eax\n\t"
2411 ".Li386_equal_end:\n\t"
2412 "xor %ebx,%ebx\n\t"
2413 "lea 0x8(%esp),%esp");
2414}
2415
2416static void
2417i386_emit_less_signed (void)
2418{
2419 EMIT_ASM32 (i386_less_signed,
2420 "cmpl %ebx,4(%esp)\n\t"
2421 "jl .Li386_less_signed_true\n\t"
2422 "jne .Li386_less_signed_false\n\t"
2423 "cmpl %eax,(%esp)\n\t"
2424 "jl .Li386_less_signed_true\n\t"
2425 ".Li386_less_signed_false:\n\t"
2426 "xor %eax,%eax\n\t"
2427 "jmp .Li386_less_signed_end\n\t"
2428 ".Li386_less_signed_true:\n\t"
2429 "mov $1,%eax\n\t"
2430 ".Li386_less_signed_end:\n\t"
2431 "xor %ebx,%ebx\n\t"
2432 "lea 0x8(%esp),%esp");
2433}
2434
2435static void
2436i386_emit_less_unsigned (void)
2437{
2438 EMIT_ASM32 (i386_less_unsigned,
2439 "cmpl %ebx,4(%esp)\n\t"
2440 "jb .Li386_less_unsigned_true\n\t"
2441 "jne .Li386_less_unsigned_false\n\t"
2442 "cmpl %eax,(%esp)\n\t"
2443 "jb .Li386_less_unsigned_true\n\t"
2444 ".Li386_less_unsigned_false:\n\t"
2445 "xor %eax,%eax\n\t"
2446 "jmp .Li386_less_unsigned_end\n\t"
2447 ".Li386_less_unsigned_true:\n\t"
2448 "mov $1,%eax\n\t"
2449 ".Li386_less_unsigned_end:\n\t"
2450 "xor %ebx,%ebx\n\t"
2451 "lea 0x8(%esp),%esp");
2452}
2453
2454static void
2455i386_emit_ref (int size)
2456{
2457 switch (size)
2458 {
2459 case 1:
2460 EMIT_ASM32 (i386_ref1,
2461 "movb (%eax),%al");
2462 break;
2463 case 2:
2464 EMIT_ASM32 (i386_ref2,
2465 "movw (%eax),%ax");
2466 break;
2467 case 4:
2468 EMIT_ASM32 (i386_ref4,
2469 "movl (%eax),%eax");
2470 break;
2471 case 8:
2472 EMIT_ASM32 (i386_ref8,
2473 "movl 4(%eax),%ebx\n\t"
2474 "movl (%eax),%eax");
2475 break;
2476 }
2477}
2478
2479static void
2480i386_emit_if_goto (int *offset_p, int *size_p)
2481{
2482 EMIT_ASM32 (i386_if_goto,
2483 "mov %eax,%ecx\n\t"
2484 "or %ebx,%ecx\n\t"
2485 "pop %eax\n\t"
2486 "pop %ebx\n\t"
2487 "cmpl $0,%ecx\n\t"
2488 /* Don't trust the assembler to choose the right jump */
2489 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2490
2491 if (offset_p)
2492 *offset_p = 11; /* be sure that this matches the sequence above */
2493 if (size_p)
2494 *size_p = 4;
2495}
2496
2497static void
2498i386_emit_goto (int *offset_p, int *size_p)
2499{
2500 EMIT_ASM32 (i386_goto,
2501 /* Don't trust the assembler to choose the right jump */
2502 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2503 if (offset_p)
2504 *offset_p = 1;
2505 if (size_p)
2506 *size_p = 4;
2507}
2508
2509static void
2510i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2511{
2512 int diff = (to - (from + size));
2513 unsigned char buf[sizeof (int)];
2514
2515 /* We're only doing 4-byte sizes at the moment. */
2516 if (size != 4)
2517 {
2518 emit_error = 1;
2519 return;
2520 }
2521
2522 memcpy (buf, &diff, sizeof (int));
2523 write_inferior_memory (from, buf, sizeof (int));
2524}
2525
2526static void
4e29fb54 2527i386_emit_const (LONGEST num)
6a271cae
PA
2528{
2529 unsigned char buf[16];
b00ad6ff 2530 int i, hi, lo;
6a271cae
PA
2531 CORE_ADDR buildaddr = current_insn_ptr;
2532
2533 i = 0;
2534 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2535 lo = num & 0xffffffff;
2536 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2537 i += 4;
2538 hi = ((num >> 32) & 0xffffffff);
2539 if (hi)
2540 {
2541 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2542 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2543 i += 4;
2544 }
2545 else
2546 {
2547 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2548 }
2549 append_insns (&buildaddr, i, buf);
2550 current_insn_ptr = buildaddr;
2551}
2552
2553static void
2554i386_emit_call (CORE_ADDR fn)
2555{
2556 unsigned char buf[16];
2557 int i, offset;
2558 CORE_ADDR buildaddr;
2559
2560 buildaddr = current_insn_ptr;
2561 i = 0;
2562 buf[i++] = 0xe8; /* call <reladdr> */
2563 offset = ((int) fn) - (buildaddr + 5);
2564 memcpy (buf + 1, &offset, 4);
2565 append_insns (&buildaddr, 5, buf);
2566 current_insn_ptr = buildaddr;
2567}
2568
2569static void
2570i386_emit_reg (int reg)
2571{
2572 unsigned char buf[16];
2573 int i;
2574 CORE_ADDR buildaddr;
2575
2576 EMIT_ASM32 (i386_reg_a,
2577 "sub $0x8,%esp");
2578 buildaddr = current_insn_ptr;
2579 i = 0;
2580 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2581 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2582 i += 4;
2583 append_insns (&buildaddr, i, buf);
2584 current_insn_ptr = buildaddr;
2585 EMIT_ASM32 (i386_reg_b,
2586 "mov %eax,4(%esp)\n\t"
2587 "mov 8(%ebp),%eax\n\t"
2588 "mov %eax,(%esp)");
2589 i386_emit_call (get_raw_reg_func_addr ());
2590 EMIT_ASM32 (i386_reg_c,
2591 "xor %ebx,%ebx\n\t"
2592 "lea 0x8(%esp),%esp");
2593}
2594
2595static void
2596i386_emit_pop (void)
2597{
2598 EMIT_ASM32 (i386_pop,
2599 "pop %eax\n\t"
2600 "pop %ebx");
2601}
2602
2603static void
2604i386_emit_stack_flush (void)
2605{
2606 EMIT_ASM32 (i386_stack_flush,
2607 "push %ebx\n\t"
2608 "push %eax");
2609}
2610
2611static void
2612i386_emit_zero_ext (int arg)
2613{
2614 switch (arg)
2615 {
2616 case 8:
2617 EMIT_ASM32 (i386_zero_ext_8,
2618 "and $0xff,%eax\n\t"
2619 "xor %ebx,%ebx");
2620 break;
2621 case 16:
2622 EMIT_ASM32 (i386_zero_ext_16,
2623 "and $0xffff,%eax\n\t"
2624 "xor %ebx,%ebx");
2625 break;
2626 case 32:
2627 EMIT_ASM32 (i386_zero_ext_32,
2628 "xor %ebx,%ebx");
2629 break;
2630 default:
2631 emit_error = 1;
2632 }
2633}
2634
2635static void
2636i386_emit_swap (void)
2637{
2638 EMIT_ASM32 (i386_swap,
2639 "mov %eax,%ecx\n\t"
2640 "mov %ebx,%edx\n\t"
2641 "pop %eax\n\t"
2642 "pop %ebx\n\t"
2643 "push %edx\n\t"
2644 "push %ecx");
2645}
2646
2647static void
2648i386_emit_stack_adjust (int n)
2649{
2650 unsigned char buf[16];
2651 int i;
2652 CORE_ADDR buildaddr = current_insn_ptr;
2653
2654 i = 0;
2655 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2656 buf[i++] = 0x64;
2657 buf[i++] = 0x24;
2658 buf[i++] = n * 8;
2659 append_insns (&buildaddr, i, buf);
2660 current_insn_ptr = buildaddr;
2661}
2662
2663/* FN's prototype is `LONGEST(*fn)(int)'. */
2664
2665static void
2666i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2667{
2668 unsigned char buf[16];
2669 int i;
2670 CORE_ADDR buildaddr;
2671
2672 EMIT_ASM32 (i386_int_call_1_a,
2673 /* Reserve a bit of stack space. */
2674 "sub $0x8,%esp");
2675 /* Put the one argument on the stack. */
2676 buildaddr = current_insn_ptr;
2677 i = 0;
2678 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2679 buf[i++] = 0x04;
2680 buf[i++] = 0x24;
b00ad6ff 2681 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2682 i += 4;
2683 append_insns (&buildaddr, i, buf);
2684 current_insn_ptr = buildaddr;
2685 i386_emit_call (fn);
2686 EMIT_ASM32 (i386_int_call_1_c,
2687 "mov %edx,%ebx\n\t"
2688 "lea 0x8(%esp),%esp");
2689}
2690
4e29fb54 2691/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2692
2693static void
2694i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2695{
2696 unsigned char buf[16];
2697 int i;
2698 CORE_ADDR buildaddr;
2699
2700 EMIT_ASM32 (i386_void_call_2_a,
2701 /* Preserve %eax only; we don't have to worry about %ebx. */
2702 "push %eax\n\t"
2703 /* Reserve a bit of stack space for arguments. */
2704 "sub $0x10,%esp\n\t"
2705 /* Copy "top" to the second argument position. (Note that
2706 we can't assume function won't scribble on its
2707 arguments, so don't try to restore from this.) */
2708 "mov %eax,4(%esp)\n\t"
2709 "mov %ebx,8(%esp)");
2710 /* Put the first argument on the stack. */
2711 buildaddr = current_insn_ptr;
2712 i = 0;
2713 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2714 buf[i++] = 0x04;
2715 buf[i++] = 0x24;
b00ad6ff 2716 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2717 i += 4;
2718 append_insns (&buildaddr, i, buf);
2719 current_insn_ptr = buildaddr;
2720 i386_emit_call (fn);
2721 EMIT_ASM32 (i386_void_call_2_b,
2722 "lea 0x10(%esp),%esp\n\t"
2723 /* Restore original stack top. */
2724 "pop %eax");
2725}
2726
6b9801d4
SS
2727
2728void
2729i386_emit_eq_goto (int *offset_p, int *size_p)
2730{
2731 EMIT_ASM32 (eq,
2732 /* Check low half first, more likely to be decider */
2733 "cmpl %eax,(%esp)\n\t"
2734 "jne .Leq_fallthru\n\t"
2735 "cmpl %ebx,4(%esp)\n\t"
2736 "jne .Leq_fallthru\n\t"
2737 "lea 0x8(%esp),%esp\n\t"
2738 "pop %eax\n\t"
2739 "pop %ebx\n\t"
2740 /* jmp, but don't trust the assembler to choose the right jump */
2741 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2742 ".Leq_fallthru:\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2744 "pop %eax\n\t"
2745 "pop %ebx");
2746
2747 if (offset_p)
2748 *offset_p = 18;
2749 if (size_p)
2750 *size_p = 4;
2751}
2752
2753void
2754i386_emit_ne_goto (int *offset_p, int *size_p)
2755{
2756 EMIT_ASM32 (ne,
2757 /* Check low half first, more likely to be decider */
2758 "cmpl %eax,(%esp)\n\t"
2759 "jne .Lne_jump\n\t"
2760 "cmpl %ebx,4(%esp)\n\t"
2761 "je .Lne_fallthru\n\t"
2762 ".Lne_jump:\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2764 "pop %eax\n\t"
2765 "pop %ebx\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Lne_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx");
2772
2773 if (offset_p)
2774 *offset_p = 18;
2775 if (size_p)
2776 *size_p = 4;
2777}
2778
2779void
2780i386_emit_lt_goto (int *offset_p, int *size_p)
2781{
2782 EMIT_ASM32 (lt,
2783 "cmpl %ebx,4(%esp)\n\t"
2784 "jl .Llt_jump\n\t"
2785 "jne .Llt_fallthru\n\t"
2786 "cmpl %eax,(%esp)\n\t"
2787 "jnl .Llt_fallthru\n\t"
2788 ".Llt_jump:\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2790 "pop %eax\n\t"
2791 "pop %ebx\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Llt_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2796 "pop %eax\n\t"
2797 "pop %ebx");
2798
2799 if (offset_p)
2800 *offset_p = 20;
2801 if (size_p)
2802 *size_p = 4;
2803}
2804
2805void
2806i386_emit_le_goto (int *offset_p, int *size_p)
2807{
2808 EMIT_ASM32 (le,
2809 "cmpl %ebx,4(%esp)\n\t"
2810 "jle .Lle_jump\n\t"
2811 "jne .Lle_fallthru\n\t"
2812 "cmpl %eax,(%esp)\n\t"
2813 "jnle .Lle_fallthru\n\t"
2814 ".Lle_jump:\n\t"
2815 "lea 0x8(%esp),%esp\n\t"
2816 "pop %eax\n\t"
2817 "pop %ebx\n\t"
2818 /* jmp, but don't trust the assembler to choose the right jump */
2819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2820 ".Lle_fallthru:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2822 "pop %eax\n\t"
2823 "pop %ebx");
2824
2825 if (offset_p)
2826 *offset_p = 20;
2827 if (size_p)
2828 *size_p = 4;
2829}
2830
2831void
2832i386_emit_gt_goto (int *offset_p, int *size_p)
2833{
2834 EMIT_ASM32 (gt,
2835 "cmpl %ebx,4(%esp)\n\t"
2836 "jg .Lgt_jump\n\t"
2837 "jne .Lgt_fallthru\n\t"
2838 "cmpl %eax,(%esp)\n\t"
2839 "jng .Lgt_fallthru\n\t"
2840 ".Lgt_jump:\n\t"
2841 "lea 0x8(%esp),%esp\n\t"
2842 "pop %eax\n\t"
2843 "pop %ebx\n\t"
2844 /* jmp, but don't trust the assembler to choose the right jump */
2845 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2846 ".Lgt_fallthru:\n\t"
2847 "lea 0x8(%esp),%esp\n\t"
2848 "pop %eax\n\t"
2849 "pop %ebx");
2850
2851 if (offset_p)
2852 *offset_p = 20;
2853 if (size_p)
2854 *size_p = 4;
2855}
2856
2857void
2858i386_emit_ge_goto (int *offset_p, int *size_p)
2859{
2860 EMIT_ASM32 (ge,
2861 "cmpl %ebx,4(%esp)\n\t"
2862 "jge .Lge_jump\n\t"
2863 "jne .Lge_fallthru\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jnge .Lge_fallthru\n\t"
2866 ".Lge_jump:\n\t"
2867 "lea 0x8(%esp),%esp\n\t"
2868 "pop %eax\n\t"
2869 "pop %ebx\n\t"
2870 /* jmp, but don't trust the assembler to choose the right jump */
2871 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2872 ".Lge_fallthru:\n\t"
2873 "lea 0x8(%esp),%esp\n\t"
2874 "pop %eax\n\t"
2875 "pop %ebx");
2876
2877 if (offset_p)
2878 *offset_p = 20;
2879 if (size_p)
2880 *size_p = 4;
2881}
2882
6a271cae
PA
2883struct emit_ops i386_emit_ops =
2884 {
2885 i386_emit_prologue,
2886 i386_emit_epilogue,
2887 i386_emit_add,
2888 i386_emit_sub,
2889 i386_emit_mul,
2890 i386_emit_lsh,
2891 i386_emit_rsh_signed,
2892 i386_emit_rsh_unsigned,
2893 i386_emit_ext,
2894 i386_emit_log_not,
2895 i386_emit_bit_and,
2896 i386_emit_bit_or,
2897 i386_emit_bit_xor,
2898 i386_emit_bit_not,
2899 i386_emit_equal,
2900 i386_emit_less_signed,
2901 i386_emit_less_unsigned,
2902 i386_emit_ref,
2903 i386_emit_if_goto,
2904 i386_emit_goto,
2905 i386_write_goto_address,
2906 i386_emit_const,
2907 i386_emit_call,
2908 i386_emit_reg,
2909 i386_emit_pop,
2910 i386_emit_stack_flush,
2911 i386_emit_zero_ext,
2912 i386_emit_swap,
2913 i386_emit_stack_adjust,
2914 i386_emit_int_call_1,
6b9801d4
SS
2915 i386_emit_void_call_2,
2916 i386_emit_eq_goto,
2917 i386_emit_ne_goto,
2918 i386_emit_lt_goto,
2919 i386_emit_le_goto,
2920 i386_emit_gt_goto,
2921 i386_emit_ge_goto
6a271cae
PA
2922 };
2923
2924
2925static struct emit_ops *
2926x86_emit_ops (void)
2927{
2928#ifdef __x86_64__
2929 int use_64bit = register_size (0) == 8;
2930
2931 if (use_64bit)
2932 return &amd64_emit_ops;
2933 else
2934#endif
2935 return &i386_emit_ops;
2936}
2937
d0722149
DE
2938/* This is initialized assuming an amd64 target.
2939 x86_arch_setup will correct it for i386 or amd64 targets. */
2940
2941struct linux_target_ops the_low_target =
2942{
2943 x86_arch_setup,
2944 -1,
2945 NULL,
2946 NULL,
2947 NULL,
1faeff08 2948 NULL,
d0722149
DE
2949 x86_get_pc,
2950 x86_set_pc,
2951 x86_breakpoint,
2952 x86_breakpoint_len,
2953 NULL,
2954 1,
2955 x86_breakpoint_at,
aa5ca48f
DE
2956 x86_insert_point,
2957 x86_remove_point,
2958 x86_stopped_by_watchpoint,
2959 x86_stopped_data_address,
d0722149
DE
2960 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2961 native i386 case (no registers smaller than an xfer unit), and are not
2962 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2963 NULL,
2964 NULL,
2965 /* need to fix up i386 siginfo if host is amd64 */
2966 x86_siginfo_fixup,
aa5ca48f
DE
2967 x86_linux_new_process,
2968 x86_linux_new_thread,
1570b33e 2969 x86_linux_prepare_to_resume,
219f2f23 2970 x86_linux_process_qsupported,
fa593d66
PA
2971 x86_supports_tracepoints,
2972 x86_get_thread_area,
6a271cae 2973 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2974 x86_emit_ops,
2975 x86_get_min_fast_tracepoint_insn_len,
d0722149 2976};
This page took 0.369566 seconds and 4 git commands to generate.