use gnulib's update-copyright script to update copyright years
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
7b6bb8da 3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
d0722149
DE
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
aa5ca48f 21#include <stddef.h>
d0722149 22#include <signal.h>
6a271cae 23#include <limits.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
32
90884b2b 33/* Defined in auto-generated file i386-linux.c. */
d0722149 34void init_registers_i386_linux (void);
90884b2b
L
35/* Defined in auto-generated file amd64-linux.c. */
36void init_registers_amd64_linux (void);
1570b33e
L
37/* Defined in auto-generated file i386-avx-linux.c. */
38void init_registers_i386_avx_linux (void);
39/* Defined in auto-generated file amd64-avx-linux.c. */
40void init_registers_amd64_avx_linux (void);
3a13a53b
L
41/* Defined in auto-generated file i386-mmx-linux.c. */
42void init_registers_i386_mmx_linux (void);
1570b33e 43
fa593d66 44static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 45static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 46
1570b33e
L
47/* Backward compatibility for gdb without XML support. */
48
49static const char *xmltarget_i386_linux_no_xml = "@<target>\
50<architecture>i386</architecture>\
51<osabi>GNU/Linux</osabi>\
52</target>";
f6d1620c
L
53
54#ifdef __x86_64__
1570b33e
L
55static const char *xmltarget_amd64_linux_no_xml = "@<target>\
56<architecture>i386:x86-64</architecture>\
57<osabi>GNU/Linux</osabi>\
58</target>";
f6d1620c 59#endif
d0722149
DE
60
61#include <sys/reg.h>
62#include <sys/procfs.h>
63#include <sys/ptrace.h>
1570b33e
L
64#include <sys/uio.h>
65
66#ifndef PTRACE_GETREGSET
67#define PTRACE_GETREGSET 0x4204
68#endif
69
70#ifndef PTRACE_SETREGSET
71#define PTRACE_SETREGSET 0x4205
72#endif
73
d0722149
DE
74
75#ifndef PTRACE_GET_THREAD_AREA
76#define PTRACE_GET_THREAD_AREA 25
77#endif
78
79/* This definition comes from prctl.h, but some kernels may not have it. */
80#ifndef PTRACE_ARCH_PRCTL
81#define PTRACE_ARCH_PRCTL 30
82#endif
83
84/* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
86#ifndef ARCH_GET_FS
87#define ARCH_SET_GS 0x1001
88#define ARCH_SET_FS 0x1002
89#define ARCH_GET_FS 0x1003
90#define ARCH_GET_GS 0x1004
91#endif
92
aa5ca48f
DE
93/* Per-process arch-specific data we want to keep. */
94
95struct arch_process_info
96{
97 struct i386_debug_reg_state debug_reg_state;
98};
99
100/* Per-thread arch-specific data we want to keep. */
101
102struct arch_lwp_info
103{
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed;
106};
107
d0722149
DE
108#ifdef __x86_64__
109
110/* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113static /*const*/ int i386_regmap[] =
114{
115 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
116 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
117 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
118 DS * 8, ES * 8, FS * 8, GS * 8
119};
120
121#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122
123/* So code below doesn't have to care, i386 or amd64. */
124#define ORIG_EAX ORIG_RAX
125
126static const int x86_64_regmap[] =
127{
128 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
129 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
130 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
131 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
132 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
133 DS * 8, ES * 8, FS * 8, GS * 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
138 ORIG_RAX * 8
139};
140
141#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142
143#else /* ! __x86_64__ */
144
145/* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147static /*const*/ int i386_regmap[] =
148{
149 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
150 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
151 EIP * 4, EFL * 4, CS * 4, SS * 4,
152 DS * 4, ES * 4, FS * 4, GS * 4
153};
154
155#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
156
157#endif
158\f
159/* Called by libthread_db. */
160
161ps_err_e
162ps_get_thread_area (const struct ps_prochandle *ph,
163 lwpid_t lwpid, int idx, void **base)
164{
165#ifdef __x86_64__
166 int use_64bit = register_size (0) == 8;
167
168 if (use_64bit)
169 {
170 switch (idx)
171 {
172 case FS:
173 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
174 return PS_OK;
175 break;
176 case GS:
177 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
178 return PS_OK;
179 break;
180 default:
181 return PS_BADADDR;
182 }
183 return PS_ERR;
184 }
185#endif
186
187 {
188 unsigned int desc[4];
189
190 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
191 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
192 return PS_ERR;
193
194 *(int *)base = desc[1];
195 return PS_OK;
196 }
197}
fa593d66
PA
198
199/* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
203
204static int
205x86_get_thread_area (int lwpid, CORE_ADDR *addr)
206{
207#ifdef __x86_64__
208 int use_64bit = register_size (0) == 8;
209
210 if (use_64bit)
211 {
212 void *base;
213 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
214 {
215 *addr = (CORE_ADDR) (uintptr_t) base;
216 return 0;
217 }
218
219 return -1;
220 }
221#endif
222
223 {
224 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
225 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
226 unsigned int desc[4];
227 ULONGEST gs = 0;
228 const int reg_thread_area = 3; /* bits to scale down register value. */
229 int idx;
230
231 collect_register_by_name (regcache, "gs", &gs);
232
233 idx = gs >> reg_thread_area;
234
235 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
236 lwpid_of (lwp),
237 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
238 return -1;
239
240 *addr = desc[1];
241 return 0;
242 }
243}
244
245
d0722149
DE
246\f
247static int
248i386_cannot_store_register (int regno)
249{
250 return regno >= I386_NUM_REGS;
251}
252
253static int
254i386_cannot_fetch_register (int regno)
255{
256 return regno >= I386_NUM_REGS;
257}
258
259static void
442ea881 260x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
261{
262 int i;
263
264#ifdef __x86_64__
265 if (register_size (0) == 8)
266 {
267 for (i = 0; i < X86_64_NUM_REGS; i++)
268 if (x86_64_regmap[i] != -1)
442ea881 269 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
270 return;
271 }
272#endif
273
274 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 275 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 276
442ea881
PA
277 collect_register_by_name (regcache, "orig_eax",
278 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
279}
280
281static void
442ea881 282x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
283{
284 int i;
285
286#ifdef __x86_64__
287 if (register_size (0) == 8)
288 {
289 for (i = 0; i < X86_64_NUM_REGS; i++)
290 if (x86_64_regmap[i] != -1)
442ea881 291 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
292 return;
293 }
294#endif
295
296 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 297 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 298
442ea881
PA
299 supply_register_by_name (regcache, "orig_eax",
300 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
301}
302
303static void
442ea881 304x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
305{
306#ifdef __x86_64__
442ea881 307 i387_cache_to_fxsave (regcache, buf);
d0722149 308#else
442ea881 309 i387_cache_to_fsave (regcache, buf);
d0722149
DE
310#endif
311}
312
313static void
442ea881 314x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
315{
316#ifdef __x86_64__
442ea881 317 i387_fxsave_to_cache (regcache, buf);
d0722149 318#else
442ea881 319 i387_fsave_to_cache (regcache, buf);
d0722149
DE
320#endif
321}
322
323#ifndef __x86_64__
324
325static void
442ea881 326x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 327{
442ea881 328 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
329}
330
331static void
442ea881 332x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 333{
442ea881 334 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
335}
336
337#endif
338
1570b33e
L
339static void
340x86_fill_xstateregset (struct regcache *regcache, void *buf)
341{
342 i387_cache_to_xsave (regcache, buf);
343}
344
345static void
346x86_store_xstateregset (struct regcache *regcache, const void *buf)
347{
348 i387_xsave_to_cache (regcache, buf);
349}
350
d0722149
DE
351/* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
359
360struct regset_info target_regsets[] =
361{
362#ifdef HAVE_PTRACE_GETREGS
1570b33e 363 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
364 GENERAL_REGS,
365 x86_fill_gregset, x86_store_gregset },
1570b33e
L
366 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
367 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
368# ifndef __x86_64__
369# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 370 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
371 EXTENDED_REGS,
372 x86_fill_fpxregset, x86_store_fpxregset },
373# endif
374# endif
1570b33e 375 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
376 FP_REGS,
377 x86_fill_fpregset, x86_store_fpregset },
378#endif /* HAVE_PTRACE_GETREGS */
1570b33e 379 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
380};
381
382static CORE_ADDR
442ea881 383x86_get_pc (struct regcache *regcache)
d0722149
DE
384{
385 int use_64bit = register_size (0) == 8;
386
387 if (use_64bit)
388 {
389 unsigned long pc;
442ea881 390 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
391 return (CORE_ADDR) pc;
392 }
393 else
394 {
395 unsigned int pc;
442ea881 396 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
397 return (CORE_ADDR) pc;
398 }
399}
400
401static void
442ea881 402x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
403{
404 int use_64bit = register_size (0) == 8;
405
406 if (use_64bit)
407 {
408 unsigned long newpc = pc;
442ea881 409 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
410 }
411 else
412 {
413 unsigned int newpc = pc;
442ea881 414 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
415 }
416}
417\f
418static const unsigned char x86_breakpoint[] = { 0xCC };
419#define x86_breakpoint_len 1
420
421static int
422x86_breakpoint_at (CORE_ADDR pc)
423{
424 unsigned char c;
425
fc7238bb 426 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
427 if (c == 0xCC)
428 return 1;
429
430 return 0;
431}
432\f
aa5ca48f
DE
433/* Support for debug registers. */
434
435static unsigned long
436x86_linux_dr_get (ptid_t ptid, int regnum)
437{
438 int tid;
439 unsigned long value;
440
441 tid = ptid_get_lwp (ptid);
442
443 errno = 0;
444 value = ptrace (PTRACE_PEEKUSER, tid,
445 offsetof (struct user, u_debugreg[regnum]), 0);
446 if (errno != 0)
447 error ("Couldn't read debug register");
448
449 return value;
450}
451
452static void
453x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
454{
455 int tid;
456
457 tid = ptid_get_lwp (ptid);
458
459 errno = 0;
460 ptrace (PTRACE_POKEUSER, tid,
461 offsetof (struct user, u_debugreg[regnum]), value);
462 if (errno != 0)
463 error ("Couldn't write debug register");
464}
465
964e4306
PA
466static int
467update_debug_registers_callback (struct inferior_list_entry *entry,
468 void *pid_p)
469{
470 struct lwp_info *lwp = (struct lwp_info *) entry;
471 int pid = *(int *) pid_p;
472
473 /* Only update the threads of this process. */
474 if (pid_of (lwp) == pid)
475 {
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp->arch_private->debug_registers_changed = 1;
479
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
482 if (!lwp->stopped)
483 linux_stop_lwp (lwp);
484 }
485
486 return 0;
487}
488
aa5ca48f
DE
489/* Update the inferior's debug register REGNUM from STATE. */
490
491void
492i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
493{
964e4306 494 /* Only update the threads of this process. */
aa5ca48f
DE
495 int pid = pid_of (get_thread_lwp (current_inferior));
496
497 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
498 fatal ("Invalid debug register %d", regnum);
499
964e4306
PA
500 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
501}
aa5ca48f 502
964e4306 503/* Return the inferior's debug register REGNUM. */
aa5ca48f 504
964e4306
PA
505CORE_ADDR
506i386_dr_low_get_addr (int regnum)
507{
508 struct lwp_info *lwp = get_thread_lwp (current_inferior);
509 ptid_t ptid = ptid_of (lwp);
510
511 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 512 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
513
514 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
515}
516
517/* Update the inferior's DR7 debug control register from STATE. */
518
519void
520i386_dr_low_set_control (const struct i386_debug_reg_state *state)
521{
964e4306 522 /* Only update the threads of this process. */
aa5ca48f
DE
523 int pid = pid_of (get_thread_lwp (current_inferior));
524
964e4306
PA
525 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
526}
aa5ca48f 527
964e4306
PA
528/* Return the inferior's DR7 debug control register. */
529
530unsigned
531i386_dr_low_get_control (void)
532{
533 struct lwp_info *lwp = get_thread_lwp (current_inferior);
534 ptid_t ptid = ptid_of (lwp);
535
536 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
537}
538
539/* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
541
964e4306
PA
542unsigned
543i386_dr_low_get_status (void)
aa5ca48f
DE
544{
545 struct lwp_info *lwp = get_thread_lwp (current_inferior);
546 ptid_t ptid = ptid_of (lwp);
547
964e4306 548 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
549}
550\f
90d74c30 551/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
552
553static int
554x86_insert_point (char type, CORE_ADDR addr, int len)
555{
556 struct process_info *proc = current_process ();
557 switch (type)
558 {
8b07ae33 559 case '0':
90d74c30
PA
560 {
561 int ret;
562
563 ret = prepare_to_access_memory ();
564 if (ret)
565 return -1;
566 ret = set_gdb_breakpoint_at (addr);
0146f85b 567 done_accessing_memory ();
90d74c30
PA
568 return ret;
569 }
aa5ca48f
DE
570 case '2':
571 case '3':
572 case '4':
573 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
574 type, addr, len);
575 default:
576 /* Unsupported. */
577 return 1;
578 }
579}
580
581static int
582x86_remove_point (char type, CORE_ADDR addr, int len)
583{
584 struct process_info *proc = current_process ();
585 switch (type)
586 {
8b07ae33 587 case '0':
90d74c30
PA
588 {
589 int ret;
590
591 ret = prepare_to_access_memory ();
592 if (ret)
593 return -1;
594 ret = delete_gdb_breakpoint_at (addr);
0146f85b 595 done_accessing_memory ();
90d74c30
PA
596 return ret;
597 }
aa5ca48f
DE
598 case '2':
599 case '3':
600 case '4':
601 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
602 type, addr, len);
603 default:
604 /* Unsupported. */
605 return 1;
606 }
607}
608
609static int
610x86_stopped_by_watchpoint (void)
611{
612 struct process_info *proc = current_process ();
613 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
614}
615
616static CORE_ADDR
617x86_stopped_data_address (void)
618{
619 struct process_info *proc = current_process ();
620 CORE_ADDR addr;
621 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
622 &addr))
623 return addr;
624 return 0;
625}
626\f
627/* Called when a new process is created. */
628
629static struct arch_process_info *
630x86_linux_new_process (void)
631{
632 struct arch_process_info *info = xcalloc (1, sizeof (*info));
633
634 i386_low_init_dregs (&info->debug_reg_state);
635
636 return info;
637}
638
639/* Called when a new thread is detected. */
640
641static struct arch_lwp_info *
642x86_linux_new_thread (void)
643{
644 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
645
646 info->debug_registers_changed = 1;
647
648 return info;
649}
650
651/* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
653
654static void
655x86_linux_prepare_to_resume (struct lwp_info *lwp)
656{
b9a881c2 657 ptid_t ptid = ptid_of (lwp);
6210a125 658 int clear_status = 0;
b9a881c2 659
aa5ca48f
DE
660 if (lwp->arch_private->debug_registers_changed)
661 {
662 int i;
aa5ca48f
DE
663 int pid = ptid_get_pid (ptid);
664 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
665 struct i386_debug_reg_state *state
666 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
667
668 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
669 if (state->dr_ref_count[i] > 0)
670 {
671 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
672
673 /* If we're setting a watchpoint, any change the inferior
674 had done itself to the debug registers needs to be
675 discarded, otherwise, i386_low_stopped_data_address can
676 get confused. */
677 clear_status = 1;
678 }
aa5ca48f
DE
679
680 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
681
682 lwp->arch_private->debug_registers_changed = 0;
683 }
b9a881c2 684
6210a125 685 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 686 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
687}
688\f
d0722149
DE
689/* When GDBSERVER is built as a 64-bit application on linux, the
690 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
691 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
692 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
693 conversion in-place ourselves. */
694
695/* These types below (compat_*) define a siginfo type that is layout
696 compatible with the siginfo type exported by the 32-bit userspace
697 support. */
698
699#ifdef __x86_64__
700
701typedef int compat_int_t;
702typedef unsigned int compat_uptr_t;
703
704typedef int compat_time_t;
705typedef int compat_timer_t;
706typedef int compat_clock_t;
707
708struct compat_timeval
709{
710 compat_time_t tv_sec;
711 int tv_usec;
712};
713
714typedef union compat_sigval
715{
716 compat_int_t sival_int;
717 compat_uptr_t sival_ptr;
718} compat_sigval_t;
719
720typedef struct compat_siginfo
721{
722 int si_signo;
723 int si_errno;
724 int si_code;
725
726 union
727 {
728 int _pad[((128 / sizeof (int)) - 3)];
729
730 /* kill() */
731 struct
732 {
733 unsigned int _pid;
734 unsigned int _uid;
735 } _kill;
736
737 /* POSIX.1b timers */
738 struct
739 {
740 compat_timer_t _tid;
741 int _overrun;
742 compat_sigval_t _sigval;
743 } _timer;
744
745 /* POSIX.1b signals */
746 struct
747 {
748 unsigned int _pid;
749 unsigned int _uid;
750 compat_sigval_t _sigval;
751 } _rt;
752
753 /* SIGCHLD */
754 struct
755 {
756 unsigned int _pid;
757 unsigned int _uid;
758 int _status;
759 compat_clock_t _utime;
760 compat_clock_t _stime;
761 } _sigchld;
762
763 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
764 struct
765 {
766 unsigned int _addr;
767 } _sigfault;
768
769 /* SIGPOLL */
770 struct
771 {
772 int _band;
773 int _fd;
774 } _sigpoll;
775 } _sifields;
776} compat_siginfo_t;
777
778#define cpt_si_pid _sifields._kill._pid
779#define cpt_si_uid _sifields._kill._uid
780#define cpt_si_timerid _sifields._timer._tid
781#define cpt_si_overrun _sifields._timer._overrun
782#define cpt_si_status _sifields._sigchld._status
783#define cpt_si_utime _sifields._sigchld._utime
784#define cpt_si_stime _sifields._sigchld._stime
785#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
786#define cpt_si_addr _sifields._sigfault._addr
787#define cpt_si_band _sifields._sigpoll._band
788#define cpt_si_fd _sifields._sigpoll._fd
789
790/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
791 In their place is si_timer1,si_timer2. */
792#ifndef si_timerid
793#define si_timerid si_timer1
794#endif
795#ifndef si_overrun
796#define si_overrun si_timer2
797#endif
798
799static void
800compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
801{
802 memset (to, 0, sizeof (*to));
803
804 to->si_signo = from->si_signo;
805 to->si_errno = from->si_errno;
806 to->si_code = from->si_code;
807
b53a1623 808 if (to->si_code == SI_TIMER)
d0722149 809 {
b53a1623
PA
810 to->cpt_si_timerid = from->si_timerid;
811 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
812 to->cpt_si_ptr = (intptr_t) from->si_ptr;
813 }
814 else if (to->si_code == SI_USER)
815 {
816 to->cpt_si_pid = from->si_pid;
817 to->cpt_si_uid = from->si_uid;
818 }
b53a1623 819 else if (to->si_code < 0)
d0722149 820 {
b53a1623
PA
821 to->cpt_si_pid = from->si_pid;
822 to->cpt_si_uid = from->si_uid;
d0722149
DE
823 to->cpt_si_ptr = (intptr_t) from->si_ptr;
824 }
825 else
826 {
827 switch (to->si_signo)
828 {
829 case SIGCHLD:
830 to->cpt_si_pid = from->si_pid;
831 to->cpt_si_uid = from->si_uid;
832 to->cpt_si_status = from->si_status;
833 to->cpt_si_utime = from->si_utime;
834 to->cpt_si_stime = from->si_stime;
835 break;
836 case SIGILL:
837 case SIGFPE:
838 case SIGSEGV:
839 case SIGBUS:
840 to->cpt_si_addr = (intptr_t) from->si_addr;
841 break;
842 case SIGPOLL:
843 to->cpt_si_band = from->si_band;
844 to->cpt_si_fd = from->si_fd;
845 break;
846 default:
847 to->cpt_si_pid = from->si_pid;
848 to->cpt_si_uid = from->si_uid;
849 to->cpt_si_ptr = (intptr_t) from->si_ptr;
850 break;
851 }
852 }
853}
854
855static void
856siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
857{
858 memset (to, 0, sizeof (*to));
859
860 to->si_signo = from->si_signo;
861 to->si_errno = from->si_errno;
862 to->si_code = from->si_code;
863
b53a1623 864 if (to->si_code == SI_TIMER)
d0722149 865 {
b53a1623
PA
866 to->si_timerid = from->cpt_si_timerid;
867 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
868 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
869 }
870 else if (to->si_code == SI_USER)
871 {
872 to->si_pid = from->cpt_si_pid;
873 to->si_uid = from->cpt_si_uid;
874 }
b53a1623 875 else if (to->si_code < 0)
d0722149 876 {
b53a1623
PA
877 to->si_pid = from->cpt_si_pid;
878 to->si_uid = from->cpt_si_uid;
d0722149
DE
879 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
880 }
881 else
882 {
883 switch (to->si_signo)
884 {
885 case SIGCHLD:
886 to->si_pid = from->cpt_si_pid;
887 to->si_uid = from->cpt_si_uid;
888 to->si_status = from->cpt_si_status;
889 to->si_utime = from->cpt_si_utime;
890 to->si_stime = from->cpt_si_stime;
891 break;
892 case SIGILL:
893 case SIGFPE:
894 case SIGSEGV:
895 case SIGBUS:
896 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
897 break;
898 case SIGPOLL:
899 to->si_band = from->cpt_si_band;
900 to->si_fd = from->cpt_si_fd;
901 break;
902 default:
903 to->si_pid = from->cpt_si_pid;
904 to->si_uid = from->cpt_si_uid;
905 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
906 break;
907 }
908 }
909}
910
911#endif /* __x86_64__ */
912
913/* Convert a native/host siginfo object, into/from the siginfo in the
914 layout of the inferiors' architecture. Returns true if any
915 conversion was done; false otherwise. If DIRECTION is 1, then copy
916 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
917 INF. */
918
919static int
920x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
921{
922#ifdef __x86_64__
923 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
924 if (register_size (0) == 4)
925 {
9f1036c1
DE
926 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
927 fatal ("unexpected difference in siginfo");
d0722149
DE
928
929 if (direction == 0)
930 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
931 else
932 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
933
934 return 1;
935 }
936#endif
937
938 return 0;
939}
940\f
1570b33e
L
941static int use_xml;
942
943/* Update gdbserver_xmltarget. */
944
945static void
946x86_linux_update_xmltarget (void)
947{
3a13a53b
L
948 int pid;
949 struct regset_info *regset;
1570b33e
L
950 static unsigned long long xcr0;
951 static int have_ptrace_getregset = -1;
59e04013 952#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
953 static int have_ptrace_getfpxregs = -1;
954#endif
1570b33e
L
955
956 if (!current_inferior)
957 return;
958
45ba0d02
PA
959 /* Before changing the register cache internal layout or the target
960 regsets, flush the contents of the current valid caches back to
961 the threads. */
962 regcache_invalidate ();
963
3a13a53b 964 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
965#ifdef __x86_64__
966 if (num_xmm_registers == 8)
967 init_registers_i386_linux ();
968 else
969 init_registers_amd64_linux ();
970#else
3a13a53b
L
971 {
972# ifdef HAVE_PTRACE_GETFPXREGS
973 if (have_ptrace_getfpxregs == -1)
974 {
975 elf_fpxregset_t fpxregs;
976
977 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
978 {
979 have_ptrace_getfpxregs = 0;
980 x86_xcr0 = I386_XSTATE_X87_MASK;
981
982 /* Disable PTRACE_GETFPXREGS. */
983 for (regset = target_regsets;
984 regset->fill_function != NULL; regset++)
985 if (regset->get_request == PTRACE_GETFPXREGS)
986 {
987 regset->size = 0;
988 break;
989 }
990 }
991 else
992 have_ptrace_getfpxregs = 1;
993 }
994
995 if (!have_ptrace_getfpxregs)
996 {
997 init_registers_i386_mmx_linux ();
998 return;
999 }
1000# endif
1001 init_registers_i386_linux ();
1002 }
1570b33e
L
1003#endif
1004
1005 if (!use_xml)
1006 {
1007 /* Don't use XML. */
1008#ifdef __x86_64__
1009 if (num_xmm_registers == 8)
1010 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1011 else
1012 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1013#else
1014 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1015#endif
1016
1017 x86_xcr0 = I386_XSTATE_SSE_MASK;
1018
1019 return;
1020 }
1021
1022 /* Check if XSAVE extended state is supported. */
1023 if (have_ptrace_getregset == -1)
1024 {
1570b33e
L
1025 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1026 struct iovec iov;
1570b33e
L
1027
1028 iov.iov_base = xstateregs;
1029 iov.iov_len = sizeof (xstateregs);
1030
1031 /* Check if PTRACE_GETREGSET works. */
1032 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1033 &iov) < 0)
1034 {
1035 have_ptrace_getregset = 0;
1036 return;
1037 }
1038 else
1039 have_ptrace_getregset = 1;
1040
1041 /* Get XCR0 from XSAVE extended state at byte 464. */
1042 xcr0 = xstateregs[464 / sizeof (long long)];
1043
1044 /* Use PTRACE_GETREGSET if it is available. */
1045 for (regset = target_regsets;
1046 regset->fill_function != NULL; regset++)
1047 if (regset->get_request == PTRACE_GETREGSET)
1048 regset->size = I386_XSTATE_SIZE (xcr0);
1049 else if (regset->type != GENERAL_REGS)
1050 regset->size = 0;
1051 }
1052
1053 if (have_ptrace_getregset)
1054 {
1055 /* AVX is the highest feature we support. */
1056 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1057 {
1058 x86_xcr0 = xcr0;
1059
1060#ifdef __x86_64__
1061 /* I386 has 8 xmm regs. */
1062 if (num_xmm_registers == 8)
1063 init_registers_i386_avx_linux ();
1064 else
1065 init_registers_amd64_avx_linux ();
1066#else
1067 init_registers_i386_avx_linux ();
1068#endif
1069 }
1070 }
1071}
1072
1073/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1074 PTRACE_GETREGSET. */
1075
1076static void
1077x86_linux_process_qsupported (const char *query)
1078{
1079 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1080 with "i386" in qSupported query, it supports x86 XML target
1081 descriptions. */
1082 use_xml = 0;
1083 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1084 {
1085 char *copy = xstrdup (query + 13);
1086 char *p;
1087
1088 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1089 {
1090 if (strcmp (p, "i386") == 0)
1091 {
1092 use_xml = 1;
1093 break;
1094 }
1095 }
1096
1097 free (copy);
1098 }
1099
1100 x86_linux_update_xmltarget ();
1101}
1102
9f1036c1 1103/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1104
1105static void
1106x86_arch_setup (void)
1107{
1108#ifdef __x86_64__
1109 int pid = pid_of (get_thread_lwp (current_inferior));
1110 char *file = linux_child_pid_to_exec_file (pid);
1111 int use_64bit = elf_64_file_p (file);
1112
1113 free (file);
1114
1115 if (use_64bit < 0)
1116 {
1117 /* This can only happen if /proc/<pid>/exe is unreadable,
1118 but "that can't happen" if we've gotten this far.
1119 Fall through and assume this is a 32-bit program. */
1120 }
1121 else if (use_64bit)
1122 {
d0722149
DE
1123 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1124 the_low_target.num_regs = -1;
1125 the_low_target.regmap = NULL;
1126 the_low_target.cannot_fetch_register = NULL;
1127 the_low_target.cannot_store_register = NULL;
1128
1129 /* Amd64 has 16 xmm regs. */
1130 num_xmm_registers = 16;
1131
1570b33e 1132 x86_linux_update_xmltarget ();
d0722149
DE
1133 return;
1134 }
1135#endif
1136
1137 /* Ok we have a 32-bit inferior. */
1138
d0722149
DE
1139 the_low_target.num_regs = I386_NUM_REGS;
1140 the_low_target.regmap = i386_regmap;
1141 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1142 the_low_target.cannot_store_register = i386_cannot_store_register;
1143
1144 /* I386 has 8 xmm regs. */
1145 num_xmm_registers = 8;
1570b33e
L
1146
1147 x86_linux_update_xmltarget ();
d0722149
DE
1148}
1149
219f2f23
PA
1150static int
1151x86_supports_tracepoints (void)
1152{
1153 return 1;
1154}
1155
fa593d66
PA
1156static void
1157append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1158{
1159 write_inferior_memory (*to, buf, len);
1160 *to += len;
1161}
1162
1163static int
1164push_opcode (unsigned char *buf, char *op)
1165{
1166 unsigned char *buf_org = buf;
1167
1168 while (1)
1169 {
1170 char *endptr;
1171 unsigned long ul = strtoul (op, &endptr, 16);
1172
1173 if (endptr == op)
1174 break;
1175
1176 *buf++ = ul;
1177 op = endptr;
1178 }
1179
1180 return buf - buf_org;
1181}
1182
1183#ifdef __x86_64__
1184
1185/* Build a jump pad that saves registers and calls a collection
1186 function. Writes a jump instruction to the jump pad to
1187 JJUMPAD_INSN. The caller is responsible to write it in at the
1188 tracepoint address. */
1189
1190static int
1191amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1192 CORE_ADDR collector,
1193 CORE_ADDR lockaddr,
1194 ULONGEST orig_size,
1195 CORE_ADDR *jump_entry,
405f8e94
SS
1196 CORE_ADDR *trampoline,
1197 ULONGEST *trampoline_size,
fa593d66
PA
1198 unsigned char *jjump_pad_insn,
1199 ULONGEST *jjump_pad_insn_size,
1200 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1201 CORE_ADDR *adjusted_insn_addr_end,
1202 char *err)
fa593d66
PA
1203{
1204 unsigned char buf[40];
1205 int i, offset;
1206 CORE_ADDR buildaddr = *jump_entry;
1207
1208 /* Build the jump pad. */
1209
1210 /* First, do tracepoint data collection. Save registers. */
1211 i = 0;
1212 /* Need to ensure stack pointer saved first. */
1213 buf[i++] = 0x54; /* push %rsp */
1214 buf[i++] = 0x55; /* push %rbp */
1215 buf[i++] = 0x57; /* push %rdi */
1216 buf[i++] = 0x56; /* push %rsi */
1217 buf[i++] = 0x52; /* push %rdx */
1218 buf[i++] = 0x51; /* push %rcx */
1219 buf[i++] = 0x53; /* push %rbx */
1220 buf[i++] = 0x50; /* push %rax */
1221 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1222 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1223 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1224 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1225 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1226 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1227 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1228 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1229 buf[i++] = 0x9c; /* pushfq */
1230 buf[i++] = 0x48; /* movl <addr>,%rdi */
1231 buf[i++] = 0xbf;
1232 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1233 i += sizeof (unsigned long);
1234 buf[i++] = 0x57; /* push %rdi */
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* Stack space for the collecting_t object. */
1238 i = 0;
1239 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1240 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1241 memcpy (buf + i, &tpoint, 8);
1242 i += 8;
1243 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1244 i += push_opcode (&buf[i],
1245 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1246 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1247 append_insns (&buildaddr, i, buf);
1248
1249 /* spin-lock. */
1250 i = 0;
1251 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1252 memcpy (&buf[i], (void *) &lockaddr, 8);
1253 i += 8;
1254 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1255 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1256 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1257 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1258 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1259 append_insns (&buildaddr, i, buf);
1260
1261 /* Set up the gdb_collect call. */
1262 /* At this point, (stack pointer + 0x18) is the base of our saved
1263 register block. */
1264
1265 i = 0;
1266 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1267 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1268
1269 /* tpoint address may be 64-bit wide. */
1270 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1271 memcpy (buf + i, &tpoint, 8);
1272 i += 8;
1273 append_insns (&buildaddr, i, buf);
1274
1275 /* The collector function being in the shared library, may be
1276 >31-bits away off the jump pad. */
1277 i = 0;
1278 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1279 memcpy (buf + i, &collector, 8);
1280 i += 8;
1281 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1282 append_insns (&buildaddr, i, buf);
1283
1284 /* Clear the spin-lock. */
1285 i = 0;
1286 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1287 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1288 memcpy (buf + i, &lockaddr, 8);
1289 i += 8;
1290 append_insns (&buildaddr, i, buf);
1291
1292 /* Remove stack that had been used for the collect_t object. */
1293 i = 0;
1294 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1295 append_insns (&buildaddr, i, buf);
1296
1297 /* Restore register state. */
1298 i = 0;
1299 buf[i++] = 0x48; /* add $0x8,%rsp */
1300 buf[i++] = 0x83;
1301 buf[i++] = 0xc4;
1302 buf[i++] = 0x08;
1303 buf[i++] = 0x9d; /* popfq */
1304 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1305 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1306 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1307 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1308 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1309 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1310 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1311 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1312 buf[i++] = 0x58; /* pop %rax */
1313 buf[i++] = 0x5b; /* pop %rbx */
1314 buf[i++] = 0x59; /* pop %rcx */
1315 buf[i++] = 0x5a; /* pop %rdx */
1316 buf[i++] = 0x5e; /* pop %rsi */
1317 buf[i++] = 0x5f; /* pop %rdi */
1318 buf[i++] = 0x5d; /* pop %rbp */
1319 buf[i++] = 0x5c; /* pop %rsp */
1320 append_insns (&buildaddr, i, buf);
1321
1322 /* Now, adjust the original instruction to execute in the jump
1323 pad. */
1324 *adjusted_insn_addr = buildaddr;
1325 relocate_instruction (&buildaddr, tpaddr);
1326 *adjusted_insn_addr_end = buildaddr;
1327
1328 /* Finally, write a jump back to the program. */
1329 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1330 memcpy (buf, jump_insn, sizeof (jump_insn));
1331 memcpy (buf + 1, &offset, 4);
1332 append_insns (&buildaddr, sizeof (jump_insn), buf);
1333
1334 /* The jump pad is now built. Wire in a jump to our jump pad. This
1335 is always done last (by our caller actually), so that we can
1336 install fast tracepoints with threads running. This relies on
1337 the agent's atomic write support. */
1338 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1339 memcpy (buf, jump_insn, sizeof (jump_insn));
1340 memcpy (buf + 1, &offset, 4);
1341 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1342 *jjump_pad_insn_size = sizeof (jump_insn);
1343
1344 /* Return the end address of our pad. */
1345 *jump_entry = buildaddr;
1346
1347 return 0;
1348}
1349
1350#endif /* __x86_64__ */
1351
1352/* Build a jump pad that saves registers and calls a collection
1353 function. Writes a jump instruction to the jump pad to
1354 JJUMPAD_INSN. The caller is responsible to write it in at the
1355 tracepoint address. */
1356
1357static int
1358i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1359 CORE_ADDR collector,
1360 CORE_ADDR lockaddr,
1361 ULONGEST orig_size,
1362 CORE_ADDR *jump_entry,
405f8e94
SS
1363 CORE_ADDR *trampoline,
1364 ULONGEST *trampoline_size,
fa593d66
PA
1365 unsigned char *jjump_pad_insn,
1366 ULONGEST *jjump_pad_insn_size,
1367 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1368 CORE_ADDR *adjusted_insn_addr_end,
1369 char *err)
fa593d66
PA
1370{
1371 unsigned char buf[0x100];
1372 int i, offset;
1373 CORE_ADDR buildaddr = *jump_entry;
1374
1375 /* Build the jump pad. */
1376
1377 /* First, do tracepoint data collection. Save registers. */
1378 i = 0;
1379 buf[i++] = 0x60; /* pushad */
1380 buf[i++] = 0x68; /* push tpaddr aka $pc */
1381 *((int *)(buf + i)) = (int) tpaddr;
1382 i += 4;
1383 buf[i++] = 0x9c; /* pushf */
1384 buf[i++] = 0x1e; /* push %ds */
1385 buf[i++] = 0x06; /* push %es */
1386 buf[i++] = 0x0f; /* push %fs */
1387 buf[i++] = 0xa0;
1388 buf[i++] = 0x0f; /* push %gs */
1389 buf[i++] = 0xa8;
1390 buf[i++] = 0x16; /* push %ss */
1391 buf[i++] = 0x0e; /* push %cs */
1392 append_insns (&buildaddr, i, buf);
1393
1394 /* Stack space for the collecting_t object. */
1395 i = 0;
1396 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1397
1398 /* Build the object. */
1399 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1400 memcpy (buf + i, &tpoint, 4);
1401 i += 4;
1402 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1403
1404 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1405 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1406 append_insns (&buildaddr, i, buf);
1407
1408 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1409 If we cared for it, this could be using xchg alternatively. */
1410
1411 i = 0;
1412 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1413 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1414 %esp,<lockaddr> */
1415 memcpy (&buf[i], (void *) &lockaddr, 4);
1416 i += 4;
1417 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1418 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1419 append_insns (&buildaddr, i, buf);
1420
1421
1422 /* Set up arguments to the gdb_collect call. */
1423 i = 0;
1424 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1425 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1426 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1431 append_insns (&buildaddr, i, buf);
1432
1433 i = 0;
1434 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1435 memcpy (&buf[i], (void *) &tpoint, 4);
1436 i += 4;
1437 append_insns (&buildaddr, i, buf);
1438
1439 buf[0] = 0xe8; /* call <reladdr> */
1440 offset = collector - (buildaddr + sizeof (jump_insn));
1441 memcpy (buf + 1, &offset, 4);
1442 append_insns (&buildaddr, 5, buf);
1443 /* Clean up after the call. */
1444 buf[0] = 0x83; /* add $0x8,%esp */
1445 buf[1] = 0xc4;
1446 buf[2] = 0x08;
1447 append_insns (&buildaddr, 3, buf);
1448
1449
1450 /* Clear the spin-lock. This would need the LOCK prefix on older
1451 broken archs. */
1452 i = 0;
1453 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1454 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1455 memcpy (buf + i, &lockaddr, 4);
1456 i += 4;
1457 append_insns (&buildaddr, i, buf);
1458
1459
1460 /* Remove stack that had been used for the collect_t object. */
1461 i = 0;
1462 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1463 append_insns (&buildaddr, i, buf);
1464
1465 i = 0;
1466 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1467 buf[i++] = 0xc4;
1468 buf[i++] = 0x04;
1469 buf[i++] = 0x17; /* pop %ss */
1470 buf[i++] = 0x0f; /* pop %gs */
1471 buf[i++] = 0xa9;
1472 buf[i++] = 0x0f; /* pop %fs */
1473 buf[i++] = 0xa1;
1474 buf[i++] = 0x07; /* pop %es */
405f8e94 1475 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1476 buf[i++] = 0x9d; /* popf */
1477 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1478 buf[i++] = 0xc4;
1479 buf[i++] = 0x04;
1480 buf[i++] = 0x61; /* popad */
1481 append_insns (&buildaddr, i, buf);
1482
1483 /* Now, adjust the original instruction to execute in the jump
1484 pad. */
1485 *adjusted_insn_addr = buildaddr;
1486 relocate_instruction (&buildaddr, tpaddr);
1487 *adjusted_insn_addr_end = buildaddr;
1488
1489 /* Write the jump back to the program. */
1490 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1491 memcpy (buf, jump_insn, sizeof (jump_insn));
1492 memcpy (buf + 1, &offset, 4);
1493 append_insns (&buildaddr, sizeof (jump_insn), buf);
1494
1495 /* The jump pad is now built. Wire in a jump to our jump pad. This
1496 is always done last (by our caller actually), so that we can
1497 install fast tracepoints with threads running. This relies on
1498 the agent's atomic write support. */
405f8e94
SS
1499 if (orig_size == 4)
1500 {
1501 /* Create a trampoline. */
1502 *trampoline_size = sizeof (jump_insn);
1503 if (!claim_trampoline_space (*trampoline_size, trampoline))
1504 {
1505 /* No trampoline space available. */
1506 strcpy (err,
1507 "E.Cannot allocate trampoline space needed for fast "
1508 "tracepoints on 4-byte instructions.");
1509 return 1;
1510 }
1511
1512 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1513 memcpy (buf, jump_insn, sizeof (jump_insn));
1514 memcpy (buf + 1, &offset, 4);
1515 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1516
1517 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1518 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1519 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1520 memcpy (buf + 2, &offset, 2);
1521 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1522 *jjump_pad_insn_size = sizeof (small_jump_insn);
1523 }
1524 else
1525 {
1526 /* Else use a 32-bit relative jump instruction. */
1527 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1528 memcpy (buf, jump_insn, sizeof (jump_insn));
1529 memcpy (buf + 1, &offset, 4);
1530 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1531 *jjump_pad_insn_size = sizeof (jump_insn);
1532 }
fa593d66
PA
1533
1534 /* Return the end address of our pad. */
1535 *jump_entry = buildaddr;
1536
1537 return 0;
1538}
1539
1540static int
1541x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1542 CORE_ADDR collector,
1543 CORE_ADDR lockaddr,
1544 ULONGEST orig_size,
1545 CORE_ADDR *jump_entry,
405f8e94
SS
1546 CORE_ADDR *trampoline,
1547 ULONGEST *trampoline_size,
fa593d66
PA
1548 unsigned char *jjump_pad_insn,
1549 ULONGEST *jjump_pad_insn_size,
1550 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1551 CORE_ADDR *adjusted_insn_addr_end,
1552 char *err)
fa593d66
PA
1553{
1554#ifdef __x86_64__
1555 if (register_size (0) == 8)
1556 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1557 collector, lockaddr,
1558 orig_size, jump_entry,
405f8e94 1559 trampoline, trampoline_size,
fa593d66
PA
1560 jjump_pad_insn,
1561 jjump_pad_insn_size,
1562 adjusted_insn_addr,
405f8e94
SS
1563 adjusted_insn_addr_end,
1564 err);
fa593d66
PA
1565#endif
1566
1567 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1568 collector, lockaddr,
1569 orig_size, jump_entry,
405f8e94 1570 trampoline, trampoline_size,
fa593d66
PA
1571 jjump_pad_insn,
1572 jjump_pad_insn_size,
1573 adjusted_insn_addr,
405f8e94
SS
1574 adjusted_insn_addr_end,
1575 err);
1576}
1577
1578/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1579 architectures. */
1580
1581static int
1582x86_get_min_fast_tracepoint_insn_len (void)
1583{
1584 static int warned_about_fast_tracepoints = 0;
1585
1586#ifdef __x86_64__
1587 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1588 used for fast tracepoints. */
1589 if (register_size (0) == 8)
1590 return 5;
1591#endif
1592
1593 if (in_process_agent_loaded ())
1594 {
1595 char errbuf[IPA_BUFSIZ];
1596
1597 errbuf[0] = '\0';
1598
1599 /* On x86, if trampolines are available, then 4-byte jump instructions
1600 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1601 with a 4-byte offset are used instead. */
1602 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1603 return 4;
1604 else
1605 {
1606 /* GDB has no channel to explain to user why a shorter fast
1607 tracepoint is not possible, but at least make GDBserver
1608 mention that something has gone awry. */
1609 if (!warned_about_fast_tracepoints)
1610 {
1611 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1612 warned_about_fast_tracepoints = 1;
1613 }
1614 return 5;
1615 }
1616 }
1617 else
1618 {
1619 /* Indicate that the minimum length is currently unknown since the IPA
1620 has not loaded yet. */
1621 return 0;
1622 }
fa593d66
PA
1623}
1624
6a271cae
PA
1625static void
1626add_insns (unsigned char *start, int len)
1627{
1628 CORE_ADDR buildaddr = current_insn_ptr;
1629
1630 if (debug_threads)
1631 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1632 len, paddress (buildaddr));
1633
1634 append_insns (&buildaddr, len, start);
1635 current_insn_ptr = buildaddr;
1636}
1637
6a271cae
PA
1638/* Our general strategy for emitting code is to avoid specifying raw
1639 bytes whenever possible, and instead copy a block of inline asm
1640 that is embedded in the function. This is a little messy, because
1641 we need to keep the compiler from discarding what looks like dead
1642 code, plus suppress various warnings. */
1643
9e4344e5
PA
1644#define EMIT_ASM(NAME, INSNS) \
1645 do \
1646 { \
1647 extern unsigned char start_ ## NAME, end_ ## NAME; \
1648 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1649 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1650 "\t" "start_" #NAME ":" \
1651 "\t" INSNS "\n" \
1652 "\t" "end_" #NAME ":"); \
1653 } while (0)
6a271cae
PA
1654
1655#ifdef __x86_64__
1656
1657#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1658 do \
1659 { \
1660 extern unsigned char start_ ## NAME, end_ ## NAME; \
1661 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1662 __asm__ (".code32\n" \
1663 "\t" "jmp end_" #NAME "\n" \
1664 "\t" "start_" #NAME ":\n" \
1665 "\t" INSNS "\n" \
1666 "\t" "end_" #NAME ":\n" \
1667 ".code64\n"); \
1668 } while (0)
6a271cae
PA
1669
1670#else
1671
1672#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1673
1674#endif
1675
1676#ifdef __x86_64__
1677
1678static void
1679amd64_emit_prologue (void)
1680{
1681 EMIT_ASM (amd64_prologue,
1682 "pushq %rbp\n\t"
1683 "movq %rsp,%rbp\n\t"
1684 "sub $0x20,%rsp\n\t"
1685 "movq %rdi,-8(%rbp)\n\t"
1686 "movq %rsi,-16(%rbp)");
1687}
1688
1689
1690static void
1691amd64_emit_epilogue (void)
1692{
1693 EMIT_ASM (amd64_epilogue,
1694 "movq -16(%rbp),%rdi\n\t"
1695 "movq %rax,(%rdi)\n\t"
1696 "xor %rax,%rax\n\t"
1697 "leave\n\t"
1698 "ret");
1699}
1700
1701static void
1702amd64_emit_add (void)
1703{
1704 EMIT_ASM (amd64_add,
1705 "add (%rsp),%rax\n\t"
1706 "lea 0x8(%rsp),%rsp");
1707}
1708
1709static void
1710amd64_emit_sub (void)
1711{
1712 EMIT_ASM (amd64_sub,
1713 "sub %rax,(%rsp)\n\t"
1714 "pop %rax");
1715}
1716
1717static void
1718amd64_emit_mul (void)
1719{
1720 emit_error = 1;
1721}
1722
1723static void
1724amd64_emit_lsh (void)
1725{
1726 emit_error = 1;
1727}
1728
1729static void
1730amd64_emit_rsh_signed (void)
1731{
1732 emit_error = 1;
1733}
1734
1735static void
1736amd64_emit_rsh_unsigned (void)
1737{
1738 emit_error = 1;
1739}
1740
1741static void
1742amd64_emit_ext (int arg)
1743{
1744 switch (arg)
1745 {
1746 case 8:
1747 EMIT_ASM (amd64_ext_8,
1748 "cbtw\n\t"
1749 "cwtl\n\t"
1750 "cltq");
1751 break;
1752 case 16:
1753 EMIT_ASM (amd64_ext_16,
1754 "cwtl\n\t"
1755 "cltq");
1756 break;
1757 case 32:
1758 EMIT_ASM (amd64_ext_32,
1759 "cltq");
1760 break;
1761 default:
1762 emit_error = 1;
1763 }
1764}
1765
1766static void
1767amd64_emit_log_not (void)
1768{
1769 EMIT_ASM (amd64_log_not,
1770 "test %rax,%rax\n\t"
1771 "sete %cl\n\t"
1772 "movzbq %cl,%rax");
1773}
1774
1775static void
1776amd64_emit_bit_and (void)
1777{
1778 EMIT_ASM (amd64_and,
1779 "and (%rsp),%rax\n\t"
1780 "lea 0x8(%rsp),%rsp");
1781}
1782
1783static void
1784amd64_emit_bit_or (void)
1785{
1786 EMIT_ASM (amd64_or,
1787 "or (%rsp),%rax\n\t"
1788 "lea 0x8(%rsp),%rsp");
1789}
1790
1791static void
1792amd64_emit_bit_xor (void)
1793{
1794 EMIT_ASM (amd64_xor,
1795 "xor (%rsp),%rax\n\t"
1796 "lea 0x8(%rsp),%rsp");
1797}
1798
1799static void
1800amd64_emit_bit_not (void)
1801{
1802 EMIT_ASM (amd64_bit_not,
1803 "xorq $0xffffffffffffffff,%rax");
1804}
1805
1806static void
1807amd64_emit_equal (void)
1808{
1809 EMIT_ASM (amd64_equal,
1810 "cmp %rax,(%rsp)\n\t"
1811 "je .Lamd64_equal_true\n\t"
1812 "xor %rax,%rax\n\t"
1813 "jmp .Lamd64_equal_end\n\t"
1814 ".Lamd64_equal_true:\n\t"
1815 "mov $0x1,%rax\n\t"
1816 ".Lamd64_equal_end:\n\t"
1817 "lea 0x8(%rsp),%rsp");
1818}
1819
1820static void
1821amd64_emit_less_signed (void)
1822{
1823 EMIT_ASM (amd64_less_signed,
1824 "cmp %rax,(%rsp)\n\t"
1825 "jl .Lamd64_less_signed_true\n\t"
1826 "xor %rax,%rax\n\t"
1827 "jmp .Lamd64_less_signed_end\n\t"
1828 ".Lamd64_less_signed_true:\n\t"
1829 "mov $1,%rax\n\t"
1830 ".Lamd64_less_signed_end:\n\t"
1831 "lea 0x8(%rsp),%rsp");
1832}
1833
1834static void
1835amd64_emit_less_unsigned (void)
1836{
1837 EMIT_ASM (amd64_less_unsigned,
1838 "cmp %rax,(%rsp)\n\t"
1839 "jb .Lamd64_less_unsigned_true\n\t"
1840 "xor %rax,%rax\n\t"
1841 "jmp .Lamd64_less_unsigned_end\n\t"
1842 ".Lamd64_less_unsigned_true:\n\t"
1843 "mov $1,%rax\n\t"
1844 ".Lamd64_less_unsigned_end:\n\t"
1845 "lea 0x8(%rsp),%rsp");
1846}
1847
1848static void
1849amd64_emit_ref (int size)
1850{
1851 switch (size)
1852 {
1853 case 1:
1854 EMIT_ASM (amd64_ref1,
1855 "movb (%rax),%al");
1856 break;
1857 case 2:
1858 EMIT_ASM (amd64_ref2,
1859 "movw (%rax),%ax");
1860 break;
1861 case 4:
1862 EMIT_ASM (amd64_ref4,
1863 "movl (%rax),%eax");
1864 break;
1865 case 8:
1866 EMIT_ASM (amd64_ref8,
1867 "movq (%rax),%rax");
1868 break;
1869 }
1870}
1871
1872static void
1873amd64_emit_if_goto (int *offset_p, int *size_p)
1874{
1875 EMIT_ASM (amd64_if_goto,
1876 "mov %rax,%rcx\n\t"
1877 "pop %rax\n\t"
1878 "cmp $0,%rcx\n\t"
1879 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1880 if (offset_p)
1881 *offset_p = 10;
1882 if (size_p)
1883 *size_p = 4;
1884}
1885
1886static void
1887amd64_emit_goto (int *offset_p, int *size_p)
1888{
1889 EMIT_ASM (amd64_goto,
1890 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1891 if (offset_p)
1892 *offset_p = 1;
1893 if (size_p)
1894 *size_p = 4;
1895}
1896
1897static void
1898amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1899{
1900 int diff = (to - (from + size));
1901 unsigned char buf[sizeof (int)];
1902
1903 if (size != 4)
1904 {
1905 emit_error = 1;
1906 return;
1907 }
1908
1909 memcpy (buf, &diff, sizeof (int));
1910 write_inferior_memory (from, buf, sizeof (int));
1911}
1912
1913static void
4e29fb54 1914amd64_emit_const (LONGEST num)
6a271cae
PA
1915{
1916 unsigned char buf[16];
1917 int i;
1918 CORE_ADDR buildaddr = current_insn_ptr;
1919
1920 i = 0;
1921 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1922 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1923 i += 8;
1924 append_insns (&buildaddr, i, buf);
1925 current_insn_ptr = buildaddr;
1926}
1927
1928static void
1929amd64_emit_call (CORE_ADDR fn)
1930{
1931 unsigned char buf[16];
1932 int i;
1933 CORE_ADDR buildaddr;
4e29fb54 1934 LONGEST offset64;
6a271cae
PA
1935
1936 /* The destination function being in the shared library, may be
1937 >31-bits away off the compiled code pad. */
1938
1939 buildaddr = current_insn_ptr;
1940
1941 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1942
1943 i = 0;
1944
1945 if (offset64 > INT_MAX || offset64 < INT_MIN)
1946 {
1947 /* Offset is too large for a call. Use callq, but that requires
1948 a register, so avoid it if possible. Use r10, since it is
1949 call-clobbered, we don't have to push/pop it. */
1950 buf[i++] = 0x48; /* mov $fn,%r10 */
1951 buf[i++] = 0xba;
1952 memcpy (buf + i, &fn, 8);
1953 i += 8;
1954 buf[i++] = 0xff; /* callq *%r10 */
1955 buf[i++] = 0xd2;
1956 }
1957 else
1958 {
1959 int offset32 = offset64; /* we know we can't overflow here. */
1960 memcpy (buf + i, &offset32, 4);
1961 i += 4;
1962 }
1963
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966}
1967
1968static void
1969amd64_emit_reg (int reg)
1970{
1971 unsigned char buf[16];
1972 int i;
1973 CORE_ADDR buildaddr;
1974
1975 /* Assume raw_regs is still in %rdi. */
1976 buildaddr = current_insn_ptr;
1977 i = 0;
1978 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1979 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1980 i += 4;
1981 append_insns (&buildaddr, i, buf);
1982 current_insn_ptr = buildaddr;
1983 amd64_emit_call (get_raw_reg_func_addr ());
1984}
1985
1986static void
1987amd64_emit_pop (void)
1988{
1989 EMIT_ASM (amd64_pop,
1990 "pop %rax");
1991}
1992
1993static void
1994amd64_emit_stack_flush (void)
1995{
1996 EMIT_ASM (amd64_stack_flush,
1997 "push %rax");
1998}
1999
2000static void
2001amd64_emit_zero_ext (int arg)
2002{
2003 switch (arg)
2004 {
2005 case 8:
2006 EMIT_ASM (amd64_zero_ext_8,
2007 "and $0xff,%rax");
2008 break;
2009 case 16:
2010 EMIT_ASM (amd64_zero_ext_16,
2011 "and $0xffff,%rax");
2012 break;
2013 case 32:
2014 EMIT_ASM (amd64_zero_ext_32,
2015 "mov $0xffffffff,%rcx\n\t"
2016 "and %rcx,%rax");
2017 break;
2018 default:
2019 emit_error = 1;
2020 }
2021}
2022
2023static void
2024amd64_emit_swap (void)
2025{
2026 EMIT_ASM (amd64_swap,
2027 "mov %rax,%rcx\n\t"
2028 "pop %rax\n\t"
2029 "push %rcx");
2030}
2031
2032static void
2033amd64_emit_stack_adjust (int n)
2034{
2035 unsigned char buf[16];
2036 int i;
2037 CORE_ADDR buildaddr = current_insn_ptr;
2038
2039 i = 0;
2040 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2041 buf[i++] = 0x8d;
2042 buf[i++] = 0x64;
2043 buf[i++] = 0x24;
2044 /* This only handles adjustments up to 16, but we don't expect any more. */
2045 buf[i++] = n * 8;
2046 append_insns (&buildaddr, i, buf);
2047 current_insn_ptr = buildaddr;
2048}
2049
2050/* FN's prototype is `LONGEST(*fn)(int)'. */
2051
2052static void
2053amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2054{
2055 unsigned char buf[16];
2056 int i;
2057 CORE_ADDR buildaddr;
2058
2059 buildaddr = current_insn_ptr;
2060 i = 0;
2061 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2062 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2063 i += 4;
2064 append_insns (&buildaddr, i, buf);
2065 current_insn_ptr = buildaddr;
2066 amd64_emit_call (fn);
2067}
2068
4e29fb54 2069/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2070
2071static void
2072amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2073{
2074 unsigned char buf[16];
2075 int i;
2076 CORE_ADDR buildaddr;
2077
2078 buildaddr = current_insn_ptr;
2079 i = 0;
2080 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2081 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2082 i += 4;
2083 append_insns (&buildaddr, i, buf);
2084 current_insn_ptr = buildaddr;
2085 EMIT_ASM (amd64_void_call_2_a,
2086 /* Save away a copy of the stack top. */
2087 "push %rax\n\t"
2088 /* Also pass top as the second argument. */
2089 "mov %rax,%rsi");
2090 amd64_emit_call (fn);
2091 EMIT_ASM (amd64_void_call_2_b,
2092 /* Restore the stack top, %rax may have been trashed. */
2093 "pop %rax");
2094}
2095
6b9801d4
SS
2096void
2097amd64_emit_eq_goto (int *offset_p, int *size_p)
2098{
2099 EMIT_ASM (amd64_eq,
2100 "cmp %rax,(%rsp)\n\t"
2101 "jne .Lamd64_eq_fallthru\n\t"
2102 "lea 0x8(%rsp),%rsp\n\t"
2103 "pop %rax\n\t"
2104 /* jmp, but don't trust the assembler to choose the right jump */
2105 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2106 ".Lamd64_eq_fallthru:\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2108 "pop %rax");
2109
2110 if (offset_p)
2111 *offset_p = 13;
2112 if (size_p)
2113 *size_p = 4;
2114}
2115
2116void
2117amd64_emit_ne_goto (int *offset_p, int *size_p)
2118{
2119 EMIT_ASM (amd64_ne,
2120 "cmp %rax,(%rsp)\n\t"
2121 "je .Lamd64_ne_fallthru\n\t"
2122 "lea 0x8(%rsp),%rsp\n\t"
2123 "pop %rax\n\t"
2124 /* jmp, but don't trust the assembler to choose the right jump */
2125 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2126 ".Lamd64_ne_fallthru:\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax");
2129
2130 if (offset_p)
2131 *offset_p = 13;
2132 if (size_p)
2133 *size_p = 4;
2134}
2135
2136void
2137amd64_emit_lt_goto (int *offset_p, int *size_p)
2138{
2139 EMIT_ASM (amd64_lt,
2140 "cmp %rax,(%rsp)\n\t"
2141 "jnl .Lamd64_lt_fallthru\n\t"
2142 "lea 0x8(%rsp),%rsp\n\t"
2143 "pop %rax\n\t"
2144 /* jmp, but don't trust the assembler to choose the right jump */
2145 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2146 ".Lamd64_lt_fallthru:\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2148 "pop %rax");
2149
2150 if (offset_p)
2151 *offset_p = 13;
2152 if (size_p)
2153 *size_p = 4;
2154}
2155
2156void
2157amd64_emit_le_goto (int *offset_p, int *size_p)
2158{
2159 EMIT_ASM (amd64_le,
2160 "cmp %rax,(%rsp)\n\t"
2161 "jnle .Lamd64_le_fallthru\n\t"
2162 "lea 0x8(%rsp),%rsp\n\t"
2163 "pop %rax\n\t"
2164 /* jmp, but don't trust the assembler to choose the right jump */
2165 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2166 ".Lamd64_le_fallthru:\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2168 "pop %rax");
2169
2170 if (offset_p)
2171 *offset_p = 13;
2172 if (size_p)
2173 *size_p = 4;
2174}
2175
2176void
2177amd64_emit_gt_goto (int *offset_p, int *size_p)
2178{
2179 EMIT_ASM (amd64_gt,
2180 "cmp %rax,(%rsp)\n\t"
2181 "jng .Lamd64_gt_fallthru\n\t"
2182 "lea 0x8(%rsp),%rsp\n\t"
2183 "pop %rax\n\t"
2184 /* jmp, but don't trust the assembler to choose the right jump */
2185 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2186 ".Lamd64_gt_fallthru:\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2188 "pop %rax");
2189
2190 if (offset_p)
2191 *offset_p = 13;
2192 if (size_p)
2193 *size_p = 4;
2194}
2195
2196void
2197amd64_emit_ge_goto (int *offset_p, int *size_p)
2198{
2199 EMIT_ASM (amd64_ge,
2200 "cmp %rax,(%rsp)\n\t"
2201 "jnge .Lamd64_ge_fallthru\n\t"
2202 ".Lamd64_ge_jump:\n\t"
2203 "lea 0x8(%rsp),%rsp\n\t"
2204 "pop %rax\n\t"
2205 /* jmp, but don't trust the assembler to choose the right jump */
2206 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2207 ".Lamd64_ge_fallthru:\n\t"
2208 "lea 0x8(%rsp),%rsp\n\t"
2209 "pop %rax");
2210
2211 if (offset_p)
2212 *offset_p = 13;
2213 if (size_p)
2214 *size_p = 4;
2215}
2216
6a271cae
PA
2217struct emit_ops amd64_emit_ops =
2218 {
2219 amd64_emit_prologue,
2220 amd64_emit_epilogue,
2221 amd64_emit_add,
2222 amd64_emit_sub,
2223 amd64_emit_mul,
2224 amd64_emit_lsh,
2225 amd64_emit_rsh_signed,
2226 amd64_emit_rsh_unsigned,
2227 amd64_emit_ext,
2228 amd64_emit_log_not,
2229 amd64_emit_bit_and,
2230 amd64_emit_bit_or,
2231 amd64_emit_bit_xor,
2232 amd64_emit_bit_not,
2233 amd64_emit_equal,
2234 amd64_emit_less_signed,
2235 amd64_emit_less_unsigned,
2236 amd64_emit_ref,
2237 amd64_emit_if_goto,
2238 amd64_emit_goto,
2239 amd64_write_goto_address,
2240 amd64_emit_const,
2241 amd64_emit_call,
2242 amd64_emit_reg,
2243 amd64_emit_pop,
2244 amd64_emit_stack_flush,
2245 amd64_emit_zero_ext,
2246 amd64_emit_swap,
2247 amd64_emit_stack_adjust,
2248 amd64_emit_int_call_1,
6b9801d4
SS
2249 amd64_emit_void_call_2,
2250 amd64_emit_eq_goto,
2251 amd64_emit_ne_goto,
2252 amd64_emit_lt_goto,
2253 amd64_emit_le_goto,
2254 amd64_emit_gt_goto,
2255 amd64_emit_ge_goto
6a271cae
PA
2256 };
2257
2258#endif /* __x86_64__ */
2259
2260static void
2261i386_emit_prologue (void)
2262{
2263 EMIT_ASM32 (i386_prologue,
2264 "push %ebp\n\t"
bf15cbda
SS
2265 "mov %esp,%ebp\n\t"
2266 "push %ebx");
6a271cae
PA
2267 /* At this point, the raw regs base address is at 8(%ebp), and the
2268 value pointer is at 12(%ebp). */
2269}
2270
2271static void
2272i386_emit_epilogue (void)
2273{
2274 EMIT_ASM32 (i386_epilogue,
2275 "mov 12(%ebp),%ecx\n\t"
2276 "mov %eax,(%ecx)\n\t"
2277 "mov %ebx,0x4(%ecx)\n\t"
2278 "xor %eax,%eax\n\t"
bf15cbda 2279 "pop %ebx\n\t"
6a271cae
PA
2280 "pop %ebp\n\t"
2281 "ret");
2282}
2283
2284static void
2285i386_emit_add (void)
2286{
2287 EMIT_ASM32 (i386_add,
2288 "add (%esp),%eax\n\t"
2289 "adc 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2291}
2292
2293static void
2294i386_emit_sub (void)
2295{
2296 EMIT_ASM32 (i386_sub,
2297 "subl %eax,(%esp)\n\t"
2298 "sbbl %ebx,4(%esp)\n\t"
2299 "pop %eax\n\t"
2300 "pop %ebx\n\t");
2301}
2302
2303static void
2304i386_emit_mul (void)
2305{
2306 emit_error = 1;
2307}
2308
2309static void
2310i386_emit_lsh (void)
2311{
2312 emit_error = 1;
2313}
2314
2315static void
2316i386_emit_rsh_signed (void)
2317{
2318 emit_error = 1;
2319}
2320
2321static void
2322i386_emit_rsh_unsigned (void)
2323{
2324 emit_error = 1;
2325}
2326
2327static void
2328i386_emit_ext (int arg)
2329{
2330 switch (arg)
2331 {
2332 case 8:
2333 EMIT_ASM32 (i386_ext_8,
2334 "cbtw\n\t"
2335 "cwtl\n\t"
2336 "movl %eax,%ebx\n\t"
2337 "sarl $31,%ebx");
2338 break;
2339 case 16:
2340 EMIT_ASM32 (i386_ext_16,
2341 "cwtl\n\t"
2342 "movl %eax,%ebx\n\t"
2343 "sarl $31,%ebx");
2344 break;
2345 case 32:
2346 EMIT_ASM32 (i386_ext_32,
2347 "movl %eax,%ebx\n\t"
2348 "sarl $31,%ebx");
2349 break;
2350 default:
2351 emit_error = 1;
2352 }
2353}
2354
2355static void
2356i386_emit_log_not (void)
2357{
2358 EMIT_ASM32 (i386_log_not,
2359 "or %ebx,%eax\n\t"
2360 "test %eax,%eax\n\t"
2361 "sete %cl\n\t"
2362 "xor %ebx,%ebx\n\t"
2363 "movzbl %cl,%eax");
2364}
2365
2366static void
2367i386_emit_bit_and (void)
2368{
2369 EMIT_ASM32 (i386_and,
2370 "and (%esp),%eax\n\t"
2371 "and 0x4(%esp),%ebx\n\t"
2372 "lea 0x8(%esp),%esp");
2373}
2374
2375static void
2376i386_emit_bit_or (void)
2377{
2378 EMIT_ASM32 (i386_or,
2379 "or (%esp),%eax\n\t"
2380 "or 0x4(%esp),%ebx\n\t"
2381 "lea 0x8(%esp),%esp");
2382}
2383
2384static void
2385i386_emit_bit_xor (void)
2386{
2387 EMIT_ASM32 (i386_xor,
2388 "xor (%esp),%eax\n\t"
2389 "xor 0x4(%esp),%ebx\n\t"
2390 "lea 0x8(%esp),%esp");
2391}
2392
2393static void
2394i386_emit_bit_not (void)
2395{
2396 EMIT_ASM32 (i386_bit_not,
2397 "xor $0xffffffff,%eax\n\t"
2398 "xor $0xffffffff,%ebx\n\t");
2399}
2400
2401static void
2402i386_emit_equal (void)
2403{
2404 EMIT_ASM32 (i386_equal,
2405 "cmpl %ebx,4(%esp)\n\t"
2406 "jne .Li386_equal_false\n\t"
2407 "cmpl %eax,(%esp)\n\t"
2408 "je .Li386_equal_true\n\t"
2409 ".Li386_equal_false:\n\t"
2410 "xor %eax,%eax\n\t"
2411 "jmp .Li386_equal_end\n\t"
2412 ".Li386_equal_true:\n\t"
2413 "mov $1,%eax\n\t"
2414 ".Li386_equal_end:\n\t"
2415 "xor %ebx,%ebx\n\t"
2416 "lea 0x8(%esp),%esp");
2417}
2418
2419static void
2420i386_emit_less_signed (void)
2421{
2422 EMIT_ASM32 (i386_less_signed,
2423 "cmpl %ebx,4(%esp)\n\t"
2424 "jl .Li386_less_signed_true\n\t"
2425 "jne .Li386_less_signed_false\n\t"
2426 "cmpl %eax,(%esp)\n\t"
2427 "jl .Li386_less_signed_true\n\t"
2428 ".Li386_less_signed_false:\n\t"
2429 "xor %eax,%eax\n\t"
2430 "jmp .Li386_less_signed_end\n\t"
2431 ".Li386_less_signed_true:\n\t"
2432 "mov $1,%eax\n\t"
2433 ".Li386_less_signed_end:\n\t"
2434 "xor %ebx,%ebx\n\t"
2435 "lea 0x8(%esp),%esp");
2436}
2437
2438static void
2439i386_emit_less_unsigned (void)
2440{
2441 EMIT_ASM32 (i386_less_unsigned,
2442 "cmpl %ebx,4(%esp)\n\t"
2443 "jb .Li386_less_unsigned_true\n\t"
2444 "jne .Li386_less_unsigned_false\n\t"
2445 "cmpl %eax,(%esp)\n\t"
2446 "jb .Li386_less_unsigned_true\n\t"
2447 ".Li386_less_unsigned_false:\n\t"
2448 "xor %eax,%eax\n\t"
2449 "jmp .Li386_less_unsigned_end\n\t"
2450 ".Li386_less_unsigned_true:\n\t"
2451 "mov $1,%eax\n\t"
2452 ".Li386_less_unsigned_end:\n\t"
2453 "xor %ebx,%ebx\n\t"
2454 "lea 0x8(%esp),%esp");
2455}
2456
2457static void
2458i386_emit_ref (int size)
2459{
2460 switch (size)
2461 {
2462 case 1:
2463 EMIT_ASM32 (i386_ref1,
2464 "movb (%eax),%al");
2465 break;
2466 case 2:
2467 EMIT_ASM32 (i386_ref2,
2468 "movw (%eax),%ax");
2469 break;
2470 case 4:
2471 EMIT_ASM32 (i386_ref4,
2472 "movl (%eax),%eax");
2473 break;
2474 case 8:
2475 EMIT_ASM32 (i386_ref8,
2476 "movl 4(%eax),%ebx\n\t"
2477 "movl (%eax),%eax");
2478 break;
2479 }
2480}
2481
2482static void
2483i386_emit_if_goto (int *offset_p, int *size_p)
2484{
2485 EMIT_ASM32 (i386_if_goto,
2486 "mov %eax,%ecx\n\t"
2487 "or %ebx,%ecx\n\t"
2488 "pop %eax\n\t"
2489 "pop %ebx\n\t"
2490 "cmpl $0,%ecx\n\t"
2491 /* Don't trust the assembler to choose the right jump */
2492 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2493
2494 if (offset_p)
2495 *offset_p = 11; /* be sure that this matches the sequence above */
2496 if (size_p)
2497 *size_p = 4;
2498}
2499
2500static void
2501i386_emit_goto (int *offset_p, int *size_p)
2502{
2503 EMIT_ASM32 (i386_goto,
2504 /* Don't trust the assembler to choose the right jump */
2505 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2506 if (offset_p)
2507 *offset_p = 1;
2508 if (size_p)
2509 *size_p = 4;
2510}
2511
2512static void
2513i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2514{
2515 int diff = (to - (from + size));
2516 unsigned char buf[sizeof (int)];
2517
2518 /* We're only doing 4-byte sizes at the moment. */
2519 if (size != 4)
2520 {
2521 emit_error = 1;
2522 return;
2523 }
2524
2525 memcpy (buf, &diff, sizeof (int));
2526 write_inferior_memory (from, buf, sizeof (int));
2527}
2528
2529static void
4e29fb54 2530i386_emit_const (LONGEST num)
6a271cae
PA
2531{
2532 unsigned char buf[16];
b00ad6ff 2533 int i, hi, lo;
6a271cae
PA
2534 CORE_ADDR buildaddr = current_insn_ptr;
2535
2536 i = 0;
2537 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2538 lo = num & 0xffffffff;
2539 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2540 i += 4;
2541 hi = ((num >> 32) & 0xffffffff);
2542 if (hi)
2543 {
2544 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2545 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2546 i += 4;
2547 }
2548 else
2549 {
2550 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2551 }
2552 append_insns (&buildaddr, i, buf);
2553 current_insn_ptr = buildaddr;
2554}
2555
2556static void
2557i386_emit_call (CORE_ADDR fn)
2558{
2559 unsigned char buf[16];
2560 int i, offset;
2561 CORE_ADDR buildaddr;
2562
2563 buildaddr = current_insn_ptr;
2564 i = 0;
2565 buf[i++] = 0xe8; /* call <reladdr> */
2566 offset = ((int) fn) - (buildaddr + 5);
2567 memcpy (buf + 1, &offset, 4);
2568 append_insns (&buildaddr, 5, buf);
2569 current_insn_ptr = buildaddr;
2570}
2571
2572static void
2573i386_emit_reg (int reg)
2574{
2575 unsigned char buf[16];
2576 int i;
2577 CORE_ADDR buildaddr;
2578
2579 EMIT_ASM32 (i386_reg_a,
2580 "sub $0x8,%esp");
2581 buildaddr = current_insn_ptr;
2582 i = 0;
2583 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2584 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2585 i += 4;
2586 append_insns (&buildaddr, i, buf);
2587 current_insn_ptr = buildaddr;
2588 EMIT_ASM32 (i386_reg_b,
2589 "mov %eax,4(%esp)\n\t"
2590 "mov 8(%ebp),%eax\n\t"
2591 "mov %eax,(%esp)");
2592 i386_emit_call (get_raw_reg_func_addr ());
2593 EMIT_ASM32 (i386_reg_c,
2594 "xor %ebx,%ebx\n\t"
2595 "lea 0x8(%esp),%esp");
2596}
2597
2598static void
2599i386_emit_pop (void)
2600{
2601 EMIT_ASM32 (i386_pop,
2602 "pop %eax\n\t"
2603 "pop %ebx");
2604}
2605
2606static void
2607i386_emit_stack_flush (void)
2608{
2609 EMIT_ASM32 (i386_stack_flush,
2610 "push %ebx\n\t"
2611 "push %eax");
2612}
2613
2614static void
2615i386_emit_zero_ext (int arg)
2616{
2617 switch (arg)
2618 {
2619 case 8:
2620 EMIT_ASM32 (i386_zero_ext_8,
2621 "and $0xff,%eax\n\t"
2622 "xor %ebx,%ebx");
2623 break;
2624 case 16:
2625 EMIT_ASM32 (i386_zero_ext_16,
2626 "and $0xffff,%eax\n\t"
2627 "xor %ebx,%ebx");
2628 break;
2629 case 32:
2630 EMIT_ASM32 (i386_zero_ext_32,
2631 "xor %ebx,%ebx");
2632 break;
2633 default:
2634 emit_error = 1;
2635 }
2636}
2637
2638static void
2639i386_emit_swap (void)
2640{
2641 EMIT_ASM32 (i386_swap,
2642 "mov %eax,%ecx\n\t"
2643 "mov %ebx,%edx\n\t"
2644 "pop %eax\n\t"
2645 "pop %ebx\n\t"
2646 "push %edx\n\t"
2647 "push %ecx");
2648}
2649
2650static void
2651i386_emit_stack_adjust (int n)
2652{
2653 unsigned char buf[16];
2654 int i;
2655 CORE_ADDR buildaddr = current_insn_ptr;
2656
2657 i = 0;
2658 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2659 buf[i++] = 0x64;
2660 buf[i++] = 0x24;
2661 buf[i++] = n * 8;
2662 append_insns (&buildaddr, i, buf);
2663 current_insn_ptr = buildaddr;
2664}
2665
2666/* FN's prototype is `LONGEST(*fn)(int)'. */
2667
2668static void
2669i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2670{
2671 unsigned char buf[16];
2672 int i;
2673 CORE_ADDR buildaddr;
2674
2675 EMIT_ASM32 (i386_int_call_1_a,
2676 /* Reserve a bit of stack space. */
2677 "sub $0x8,%esp");
2678 /* Put the one argument on the stack. */
2679 buildaddr = current_insn_ptr;
2680 i = 0;
2681 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2682 buf[i++] = 0x04;
2683 buf[i++] = 0x24;
b00ad6ff 2684 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2685 i += 4;
2686 append_insns (&buildaddr, i, buf);
2687 current_insn_ptr = buildaddr;
2688 i386_emit_call (fn);
2689 EMIT_ASM32 (i386_int_call_1_c,
2690 "mov %edx,%ebx\n\t"
2691 "lea 0x8(%esp),%esp");
2692}
2693
4e29fb54 2694/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2695
2696static void
2697i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2698{
2699 unsigned char buf[16];
2700 int i;
2701 CORE_ADDR buildaddr;
2702
2703 EMIT_ASM32 (i386_void_call_2_a,
2704 /* Preserve %eax only; we don't have to worry about %ebx. */
2705 "push %eax\n\t"
2706 /* Reserve a bit of stack space for arguments. */
2707 "sub $0x10,%esp\n\t"
2708 /* Copy "top" to the second argument position. (Note that
2709 we can't assume function won't scribble on its
2710 arguments, so don't try to restore from this.) */
2711 "mov %eax,4(%esp)\n\t"
2712 "mov %ebx,8(%esp)");
2713 /* Put the first argument on the stack. */
2714 buildaddr = current_insn_ptr;
2715 i = 0;
2716 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2717 buf[i++] = 0x04;
2718 buf[i++] = 0x24;
b00ad6ff 2719 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2720 i += 4;
2721 append_insns (&buildaddr, i, buf);
2722 current_insn_ptr = buildaddr;
2723 i386_emit_call (fn);
2724 EMIT_ASM32 (i386_void_call_2_b,
2725 "lea 0x10(%esp),%esp\n\t"
2726 /* Restore original stack top. */
2727 "pop %eax");
2728}
2729
6b9801d4
SS
2730
2731void
2732i386_emit_eq_goto (int *offset_p, int *size_p)
2733{
2734 EMIT_ASM32 (eq,
2735 /* Check low half first, more likely to be decider */
2736 "cmpl %eax,(%esp)\n\t"
2737 "jne .Leq_fallthru\n\t"
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jne .Leq_fallthru\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2741 "pop %eax\n\t"
2742 "pop %ebx\n\t"
2743 /* jmp, but don't trust the assembler to choose the right jump */
2744 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2745 ".Leq_fallthru:\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2747 "pop %eax\n\t"
2748 "pop %ebx");
2749
2750 if (offset_p)
2751 *offset_p = 18;
2752 if (size_p)
2753 *size_p = 4;
2754}
2755
2756void
2757i386_emit_ne_goto (int *offset_p, int *size_p)
2758{
2759 EMIT_ASM32 (ne,
2760 /* Check low half first, more likely to be decider */
2761 "cmpl %eax,(%esp)\n\t"
2762 "jne .Lne_jump\n\t"
2763 "cmpl %ebx,4(%esp)\n\t"
2764 "je .Lne_fallthru\n\t"
2765 ".Lne_jump:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx\n\t"
2769 /* jmp, but don't trust the assembler to choose the right jump */
2770 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2771 ".Lne_fallthru:\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2773 "pop %eax\n\t"
2774 "pop %ebx");
2775
2776 if (offset_p)
2777 *offset_p = 18;
2778 if (size_p)
2779 *size_p = 4;
2780}
2781
2782void
2783i386_emit_lt_goto (int *offset_p, int *size_p)
2784{
2785 EMIT_ASM32 (lt,
2786 "cmpl %ebx,4(%esp)\n\t"
2787 "jl .Llt_jump\n\t"
2788 "jne .Llt_fallthru\n\t"
2789 "cmpl %eax,(%esp)\n\t"
2790 "jnl .Llt_fallthru\n\t"
2791 ".Llt_jump:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2793 "pop %eax\n\t"
2794 "pop %ebx\n\t"
2795 /* jmp, but don't trust the assembler to choose the right jump */
2796 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2797 ".Llt_fallthru:\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2799 "pop %eax\n\t"
2800 "pop %ebx");
2801
2802 if (offset_p)
2803 *offset_p = 20;
2804 if (size_p)
2805 *size_p = 4;
2806}
2807
2808void
2809i386_emit_le_goto (int *offset_p, int *size_p)
2810{
2811 EMIT_ASM32 (le,
2812 "cmpl %ebx,4(%esp)\n\t"
2813 "jle .Lle_jump\n\t"
2814 "jne .Lle_fallthru\n\t"
2815 "cmpl %eax,(%esp)\n\t"
2816 "jnle .Lle_fallthru\n\t"
2817 ".Lle_jump:\n\t"
2818 "lea 0x8(%esp),%esp\n\t"
2819 "pop %eax\n\t"
2820 "pop %ebx\n\t"
2821 /* jmp, but don't trust the assembler to choose the right jump */
2822 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2823 ".Lle_fallthru:\n\t"
2824 "lea 0x8(%esp),%esp\n\t"
2825 "pop %eax\n\t"
2826 "pop %ebx");
2827
2828 if (offset_p)
2829 *offset_p = 20;
2830 if (size_p)
2831 *size_p = 4;
2832}
2833
2834void
2835i386_emit_gt_goto (int *offset_p, int *size_p)
2836{
2837 EMIT_ASM32 (gt,
2838 "cmpl %ebx,4(%esp)\n\t"
2839 "jg .Lgt_jump\n\t"
2840 "jne .Lgt_fallthru\n\t"
2841 "cmpl %eax,(%esp)\n\t"
2842 "jng .Lgt_fallthru\n\t"
2843 ".Lgt_jump:\n\t"
2844 "lea 0x8(%esp),%esp\n\t"
2845 "pop %eax\n\t"
2846 "pop %ebx\n\t"
2847 /* jmp, but don't trust the assembler to choose the right jump */
2848 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2849 ".Lgt_fallthru:\n\t"
2850 "lea 0x8(%esp),%esp\n\t"
2851 "pop %eax\n\t"
2852 "pop %ebx");
2853
2854 if (offset_p)
2855 *offset_p = 20;
2856 if (size_p)
2857 *size_p = 4;
2858}
2859
2860void
2861i386_emit_ge_goto (int *offset_p, int *size_p)
2862{
2863 EMIT_ASM32 (ge,
2864 "cmpl %ebx,4(%esp)\n\t"
2865 "jge .Lge_jump\n\t"
2866 "jne .Lge_fallthru\n\t"
2867 "cmpl %eax,(%esp)\n\t"
2868 "jnge .Lge_fallthru\n\t"
2869 ".Lge_jump:\n\t"
2870 "lea 0x8(%esp),%esp\n\t"
2871 "pop %eax\n\t"
2872 "pop %ebx\n\t"
2873 /* jmp, but don't trust the assembler to choose the right jump */
2874 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2875 ".Lge_fallthru:\n\t"
2876 "lea 0x8(%esp),%esp\n\t"
2877 "pop %eax\n\t"
2878 "pop %ebx");
2879
2880 if (offset_p)
2881 *offset_p = 20;
2882 if (size_p)
2883 *size_p = 4;
2884}
2885
6a271cae
PA
2886struct emit_ops i386_emit_ops =
2887 {
2888 i386_emit_prologue,
2889 i386_emit_epilogue,
2890 i386_emit_add,
2891 i386_emit_sub,
2892 i386_emit_mul,
2893 i386_emit_lsh,
2894 i386_emit_rsh_signed,
2895 i386_emit_rsh_unsigned,
2896 i386_emit_ext,
2897 i386_emit_log_not,
2898 i386_emit_bit_and,
2899 i386_emit_bit_or,
2900 i386_emit_bit_xor,
2901 i386_emit_bit_not,
2902 i386_emit_equal,
2903 i386_emit_less_signed,
2904 i386_emit_less_unsigned,
2905 i386_emit_ref,
2906 i386_emit_if_goto,
2907 i386_emit_goto,
2908 i386_write_goto_address,
2909 i386_emit_const,
2910 i386_emit_call,
2911 i386_emit_reg,
2912 i386_emit_pop,
2913 i386_emit_stack_flush,
2914 i386_emit_zero_ext,
2915 i386_emit_swap,
2916 i386_emit_stack_adjust,
2917 i386_emit_int_call_1,
6b9801d4
SS
2918 i386_emit_void_call_2,
2919 i386_emit_eq_goto,
2920 i386_emit_ne_goto,
2921 i386_emit_lt_goto,
2922 i386_emit_le_goto,
2923 i386_emit_gt_goto,
2924 i386_emit_ge_goto
6a271cae
PA
2925 };
2926
2927
2928static struct emit_ops *
2929x86_emit_ops (void)
2930{
2931#ifdef __x86_64__
2932 int use_64bit = register_size (0) == 8;
2933
2934 if (use_64bit)
2935 return &amd64_emit_ops;
2936 else
2937#endif
2938 return &i386_emit_ops;
2939}
2940
d0722149
DE
2941/* This is initialized assuming an amd64 target.
2942 x86_arch_setup will correct it for i386 or amd64 targets. */
2943
2944struct linux_target_ops the_low_target =
2945{
2946 x86_arch_setup,
2947 -1,
2948 NULL,
2949 NULL,
2950 NULL,
2951 x86_get_pc,
2952 x86_set_pc,
2953 x86_breakpoint,
2954 x86_breakpoint_len,
2955 NULL,
2956 1,
2957 x86_breakpoint_at,
aa5ca48f
DE
2958 x86_insert_point,
2959 x86_remove_point,
2960 x86_stopped_by_watchpoint,
2961 x86_stopped_data_address,
d0722149
DE
2962 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2963 native i386 case (no registers smaller than an xfer unit), and are not
2964 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2965 NULL,
2966 NULL,
2967 /* need to fix up i386 siginfo if host is amd64 */
2968 x86_siginfo_fixup,
aa5ca48f
DE
2969 x86_linux_new_process,
2970 x86_linux_new_thread,
1570b33e 2971 x86_linux_prepare_to_resume,
219f2f23 2972 x86_linux_process_qsupported,
fa593d66
PA
2973 x86_supports_tracepoints,
2974 x86_get_thread_area,
6a271cae 2975 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2976 x86_emit_ops,
2977 x86_get_min_fast_tracepoint_insn_len,
d0722149 2978};
This page took 0.503503 seconds and 4 git commands to generate.