gdb/testsuite/gdb.base/long_long.exp: Fix ARM EABI target
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
28e7fd62 3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
58b4daa5 32#include "agent.h"
d0722149 33
90884b2b 34/* Defined in auto-generated file i386-linux.c. */
d0722149 35void init_registers_i386_linux (void);
90884b2b
L
36/* Defined in auto-generated file amd64-linux.c. */
37void init_registers_amd64_linux (void);
1570b33e
L
38/* Defined in auto-generated file i386-avx-linux.c. */
39void init_registers_i386_avx_linux (void);
40/* Defined in auto-generated file amd64-avx-linux.c. */
41void init_registers_amd64_avx_linux (void);
3a13a53b
L
42/* Defined in auto-generated file i386-mmx-linux.c. */
43void init_registers_i386_mmx_linux (void);
4d47af5c
L
44/* Defined in auto-generated file x32-linux.c. */
45void init_registers_x32_linux (void);
46/* Defined in auto-generated file x32-avx-linux.c. */
47void init_registers_x32_avx_linux (void);
1570b33e 48
fa593d66 49static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 50static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 51
1570b33e
L
52/* Backward compatibility for gdb without XML support. */
53
54static const char *xmltarget_i386_linux_no_xml = "@<target>\
55<architecture>i386</architecture>\
56<osabi>GNU/Linux</osabi>\
57</target>";
f6d1620c
L
58
59#ifdef __x86_64__
1570b33e
L
60static const char *xmltarget_amd64_linux_no_xml = "@<target>\
61<architecture>i386:x86-64</architecture>\
62<osabi>GNU/Linux</osabi>\
63</target>";
f6d1620c 64#endif
d0722149
DE
65
66#include <sys/reg.h>
67#include <sys/procfs.h>
68#include <sys/ptrace.h>
1570b33e
L
69#include <sys/uio.h>
70
71#ifndef PTRACE_GETREGSET
72#define PTRACE_GETREGSET 0x4204
73#endif
74
75#ifndef PTRACE_SETREGSET
76#define PTRACE_SETREGSET 0x4205
77#endif
78
d0722149
DE
79
80#ifndef PTRACE_GET_THREAD_AREA
81#define PTRACE_GET_THREAD_AREA 25
82#endif
83
84/* This definition comes from prctl.h, but some kernels may not have it. */
85#ifndef PTRACE_ARCH_PRCTL
86#define PTRACE_ARCH_PRCTL 30
87#endif
88
89/* The following definitions come from prctl.h, but may be absent
90 for certain configurations. */
91#ifndef ARCH_GET_FS
92#define ARCH_SET_GS 0x1001
93#define ARCH_SET_FS 0x1002
94#define ARCH_GET_FS 0x1003
95#define ARCH_GET_GS 0x1004
96#endif
97
aa5ca48f
DE
98/* Per-process arch-specific data we want to keep. */
99
100struct arch_process_info
101{
102 struct i386_debug_reg_state debug_reg_state;
103};
104
105/* Per-thread arch-specific data we want to keep. */
106
107struct arch_lwp_info
108{
109 /* Non-zero if our copy differs from what's recorded in the thread. */
110 int debug_registers_changed;
111};
112
d0722149
DE
113#ifdef __x86_64__
114
115/* Mapping between the general-purpose registers in `struct user'
116 format and GDB's register array layout.
117 Note that the transfer layout uses 64-bit regs. */
118static /*const*/ int i386_regmap[] =
119{
120 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
121 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
122 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
123 DS * 8, ES * 8, FS * 8, GS * 8
124};
125
126#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
127
128/* So code below doesn't have to care, i386 or amd64. */
129#define ORIG_EAX ORIG_RAX
130
131static const int x86_64_regmap[] =
132{
133 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
134 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
135 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
136 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
137 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
138 DS * 8, ES * 8, FS * 8, GS * 8,
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1,
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, -1,
143 ORIG_RAX * 8
144};
145
146#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
147
148#else /* ! __x86_64__ */
149
150/* Mapping between the general-purpose registers in `struct user'
151 format and GDB's register array layout. */
152static /*const*/ int i386_regmap[] =
153{
154 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
155 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
156 EIP * 4, EFL * 4, CS * 4, SS * 4,
157 DS * 4, ES * 4, FS * 4, GS * 4
158};
159
160#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
161
162#endif
163\f
164/* Called by libthread_db. */
165
166ps_err_e
167ps_get_thread_area (const struct ps_prochandle *ph,
168 lwpid_t lwpid, int idx, void **base)
169{
170#ifdef __x86_64__
171 int use_64bit = register_size (0) == 8;
172
173 if (use_64bit)
174 {
175 switch (idx)
176 {
177 case FS:
178 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
179 return PS_OK;
180 break;
181 case GS:
182 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
183 return PS_OK;
184 break;
185 default:
186 return PS_BADADDR;
187 }
188 return PS_ERR;
189 }
190#endif
191
192 {
193 unsigned int desc[4];
194
195 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
196 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
197 return PS_ERR;
198
d1ec4ce7
DE
199 /* Ensure we properly extend the value to 64-bits for x86_64. */
200 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
201 return PS_OK;
202 }
203}
fa593d66
PA
204
205/* Get the thread area address. This is used to recognize which
206 thread is which when tracing with the in-process agent library. We
207 don't read anything from the address, and treat it as opaque; it's
208 the address itself that we assume is unique per-thread. */
209
210static int
211x86_get_thread_area (int lwpid, CORE_ADDR *addr)
212{
213#ifdef __x86_64__
214 int use_64bit = register_size (0) == 8;
215
216 if (use_64bit)
217 {
218 void *base;
219 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
220 {
221 *addr = (CORE_ADDR) (uintptr_t) base;
222 return 0;
223 }
224
225 return -1;
226 }
227#endif
228
229 {
230 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
231 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
232 unsigned int desc[4];
233 ULONGEST gs = 0;
234 const int reg_thread_area = 3; /* bits to scale down register value. */
235 int idx;
236
237 collect_register_by_name (regcache, "gs", &gs);
238
239 idx = gs >> reg_thread_area;
240
241 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
242 lwpid_of (lwp),
243 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
244 return -1;
245
246 *addr = desc[1];
247 return 0;
248 }
249}
250
251
d0722149
DE
252\f
253static int
254i386_cannot_store_register (int regno)
255{
256 return regno >= I386_NUM_REGS;
257}
258
259static int
260i386_cannot_fetch_register (int regno)
261{
262 return regno >= I386_NUM_REGS;
263}
264
265static void
442ea881 266x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
267{
268 int i;
269
270#ifdef __x86_64__
271 if (register_size (0) == 8)
272 {
273 for (i = 0; i < X86_64_NUM_REGS; i++)
274 if (x86_64_regmap[i] != -1)
442ea881 275 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
276 return;
277 }
278#endif
279
280 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 281 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 282
442ea881
PA
283 collect_register_by_name (regcache, "orig_eax",
284 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
285}
286
287static void
442ea881 288x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
289{
290 int i;
291
292#ifdef __x86_64__
293 if (register_size (0) == 8)
294 {
295 for (i = 0; i < X86_64_NUM_REGS; i++)
296 if (x86_64_regmap[i] != -1)
442ea881 297 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
298 return;
299 }
300#endif
301
302 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 303 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 304
442ea881
PA
305 supply_register_by_name (regcache, "orig_eax",
306 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
307}
308
309static void
442ea881 310x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
311{
312#ifdef __x86_64__
442ea881 313 i387_cache_to_fxsave (regcache, buf);
d0722149 314#else
442ea881 315 i387_cache_to_fsave (regcache, buf);
d0722149
DE
316#endif
317}
318
319static void
442ea881 320x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
321{
322#ifdef __x86_64__
442ea881 323 i387_fxsave_to_cache (regcache, buf);
d0722149 324#else
442ea881 325 i387_fsave_to_cache (regcache, buf);
d0722149
DE
326#endif
327}
328
329#ifndef __x86_64__
330
331static void
442ea881 332x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 333{
442ea881 334 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
335}
336
337static void
442ea881 338x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 339{
442ea881 340 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
341}
342
343#endif
344
1570b33e
L
345static void
346x86_fill_xstateregset (struct regcache *regcache, void *buf)
347{
348 i387_cache_to_xsave (regcache, buf);
349}
350
351static void
352x86_store_xstateregset (struct regcache *regcache, const void *buf)
353{
354 i387_xsave_to_cache (regcache, buf);
355}
356
d0722149
DE
357/* ??? The non-biarch i386 case stores all the i387 regs twice.
358 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
359 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
360 doesn't work. IWBN to avoid the duplication in the case where it
361 does work. Maybe the arch_setup routine could check whether it works
362 and update target_regsets accordingly, maybe by moving target_regsets
363 to linux_target_ops and set the right one there, rather than having to
364 modify the target_regsets global. */
365
366struct regset_info target_regsets[] =
367{
368#ifdef HAVE_PTRACE_GETREGS
1570b33e 369 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
370 GENERAL_REGS,
371 x86_fill_gregset, x86_store_gregset },
1570b33e
L
372 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
373 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
374# ifndef __x86_64__
375# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 376 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
377 EXTENDED_REGS,
378 x86_fill_fpxregset, x86_store_fpxregset },
379# endif
380# endif
1570b33e 381 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
382 FP_REGS,
383 x86_fill_fpregset, x86_store_fpregset },
384#endif /* HAVE_PTRACE_GETREGS */
1570b33e 385 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
386};
387
388static CORE_ADDR
442ea881 389x86_get_pc (struct regcache *regcache)
d0722149
DE
390{
391 int use_64bit = register_size (0) == 8;
392
393 if (use_64bit)
394 {
395 unsigned long pc;
442ea881 396 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
397 return (CORE_ADDR) pc;
398 }
399 else
400 {
401 unsigned int pc;
442ea881 402 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
403 return (CORE_ADDR) pc;
404 }
405}
406
407static void
442ea881 408x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
409{
410 int use_64bit = register_size (0) == 8;
411
412 if (use_64bit)
413 {
414 unsigned long newpc = pc;
442ea881 415 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
416 }
417 else
418 {
419 unsigned int newpc = pc;
442ea881 420 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
421 }
422}
423\f
424static const unsigned char x86_breakpoint[] = { 0xCC };
425#define x86_breakpoint_len 1
426
427static int
428x86_breakpoint_at (CORE_ADDR pc)
429{
430 unsigned char c;
431
fc7238bb 432 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
433 if (c == 0xCC)
434 return 1;
435
436 return 0;
437}
438\f
aa5ca48f
DE
439/* Support for debug registers. */
440
441static unsigned long
442x86_linux_dr_get (ptid_t ptid, int regnum)
443{
444 int tid;
445 unsigned long value;
446
447 tid = ptid_get_lwp (ptid);
448
449 errno = 0;
450 value = ptrace (PTRACE_PEEKUSER, tid,
451 offsetof (struct user, u_debugreg[regnum]), 0);
452 if (errno != 0)
453 error ("Couldn't read debug register");
454
455 return value;
456}
457
458static void
459x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
460{
461 int tid;
462
463 tid = ptid_get_lwp (ptid);
464
465 errno = 0;
466 ptrace (PTRACE_POKEUSER, tid,
467 offsetof (struct user, u_debugreg[regnum]), value);
468 if (errno != 0)
469 error ("Couldn't write debug register");
470}
471
964e4306
PA
472static int
473update_debug_registers_callback (struct inferior_list_entry *entry,
474 void *pid_p)
475{
476 struct lwp_info *lwp = (struct lwp_info *) entry;
477 int pid = *(int *) pid_p;
478
479 /* Only update the threads of this process. */
480 if (pid_of (lwp) == pid)
481 {
482 /* The actual update is done later just before resuming the lwp,
483 we just mark that the registers need updating. */
484 lwp->arch_private->debug_registers_changed = 1;
485
486 /* If the lwp isn't stopped, force it to momentarily pause, so
487 we can update its debug registers. */
488 if (!lwp->stopped)
489 linux_stop_lwp (lwp);
490 }
491
492 return 0;
493}
494
aa5ca48f
DE
495/* Update the inferior's debug register REGNUM from STATE. */
496
497void
498i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
499{
964e4306 500 /* Only update the threads of this process. */
aa5ca48f
DE
501 int pid = pid_of (get_thread_lwp (current_inferior));
502
503 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
504 fatal ("Invalid debug register %d", regnum);
505
964e4306
PA
506 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
507}
aa5ca48f 508
964e4306 509/* Return the inferior's debug register REGNUM. */
aa5ca48f 510
964e4306
PA
511CORE_ADDR
512i386_dr_low_get_addr (int regnum)
513{
514 struct lwp_info *lwp = get_thread_lwp (current_inferior);
515 ptid_t ptid = ptid_of (lwp);
516
517 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 518 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
519
520 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
521}
522
523/* Update the inferior's DR7 debug control register from STATE. */
524
525void
526i386_dr_low_set_control (const struct i386_debug_reg_state *state)
527{
964e4306 528 /* Only update the threads of this process. */
aa5ca48f
DE
529 int pid = pid_of (get_thread_lwp (current_inferior));
530
964e4306
PA
531 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
532}
aa5ca48f 533
964e4306
PA
534/* Return the inferior's DR7 debug control register. */
535
536unsigned
537i386_dr_low_get_control (void)
538{
539 struct lwp_info *lwp = get_thread_lwp (current_inferior);
540 ptid_t ptid = ptid_of (lwp);
541
542 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
543}
544
545/* Get the value of the DR6 debug status register from the inferior
546 and record it in STATE. */
547
964e4306
PA
548unsigned
549i386_dr_low_get_status (void)
aa5ca48f
DE
550{
551 struct lwp_info *lwp = get_thread_lwp (current_inferior);
552 ptid_t ptid = ptid_of (lwp);
553
964e4306 554 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
555}
556\f
90d74c30 557/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
558
559static int
560x86_insert_point (char type, CORE_ADDR addr, int len)
561{
562 struct process_info *proc = current_process ();
563 switch (type)
564 {
961bd387 565 case '0': /* software-breakpoint */
90d74c30
PA
566 {
567 int ret;
568
569 ret = prepare_to_access_memory ();
570 if (ret)
571 return -1;
572 ret = set_gdb_breakpoint_at (addr);
0146f85b 573 done_accessing_memory ();
90d74c30
PA
574 return ret;
575 }
961bd387
ME
576 case '1': /* hardware-breakpoint */
577 case '2': /* write watchpoint */
578 case '3': /* read watchpoint */
579 case '4': /* access watchpoint */
aa5ca48f
DE
580 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
581 type, addr, len);
961bd387 582
aa5ca48f
DE
583 default:
584 /* Unsupported. */
585 return 1;
586 }
587}
588
589static int
590x86_remove_point (char type, CORE_ADDR addr, int len)
591{
592 struct process_info *proc = current_process ();
593 switch (type)
594 {
961bd387 595 case '0': /* software-breakpoint */
90d74c30
PA
596 {
597 int ret;
598
599 ret = prepare_to_access_memory ();
600 if (ret)
601 return -1;
602 ret = delete_gdb_breakpoint_at (addr);
0146f85b 603 done_accessing_memory ();
90d74c30
PA
604 return ret;
605 }
961bd387
ME
606 case '1': /* hardware-breakpoint */
607 case '2': /* write watchpoint */
608 case '3': /* read watchpoint */
609 case '4': /* access watchpoint */
aa5ca48f
DE
610 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
611 type, addr, len);
612 default:
613 /* Unsupported. */
614 return 1;
615 }
616}
617
618static int
619x86_stopped_by_watchpoint (void)
620{
621 struct process_info *proc = current_process ();
622 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
623}
624
625static CORE_ADDR
626x86_stopped_data_address (void)
627{
628 struct process_info *proc = current_process ();
629 CORE_ADDR addr;
630 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
631 &addr))
632 return addr;
633 return 0;
634}
635\f
636/* Called when a new process is created. */
637
638static struct arch_process_info *
639x86_linux_new_process (void)
640{
641 struct arch_process_info *info = xcalloc (1, sizeof (*info));
642
643 i386_low_init_dregs (&info->debug_reg_state);
644
645 return info;
646}
647
648/* Called when a new thread is detected. */
649
650static struct arch_lwp_info *
651x86_linux_new_thread (void)
652{
653 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
654
655 info->debug_registers_changed = 1;
656
657 return info;
658}
659
660/* Called when resuming a thread.
661 If the debug regs have changed, update the thread's copies. */
662
663static void
664x86_linux_prepare_to_resume (struct lwp_info *lwp)
665{
b9a881c2 666 ptid_t ptid = ptid_of (lwp);
6210a125 667 int clear_status = 0;
b9a881c2 668
aa5ca48f
DE
669 if (lwp->arch_private->debug_registers_changed)
670 {
671 int i;
aa5ca48f
DE
672 int pid = ptid_get_pid (ptid);
673 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
674 struct i386_debug_reg_state *state
675 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
676
677 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
678 if (state->dr_ref_count[i] > 0)
679 {
680 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
681
682 /* If we're setting a watchpoint, any change the inferior
683 had done itself to the debug registers needs to be
684 discarded, otherwise, i386_low_stopped_data_address can
685 get confused. */
686 clear_status = 1;
687 }
aa5ca48f
DE
688
689 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
690
691 lwp->arch_private->debug_registers_changed = 0;
692 }
b9a881c2 693
6210a125 694 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 695 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
696}
697\f
d0722149
DE
698/* When GDBSERVER is built as a 64-bit application on linux, the
699 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
700 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
701 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
702 conversion in-place ourselves. */
703
704/* These types below (compat_*) define a siginfo type that is layout
705 compatible with the siginfo type exported by the 32-bit userspace
706 support. */
707
708#ifdef __x86_64__
709
710typedef int compat_int_t;
711typedef unsigned int compat_uptr_t;
712
713typedef int compat_time_t;
714typedef int compat_timer_t;
715typedef int compat_clock_t;
716
717struct compat_timeval
718{
719 compat_time_t tv_sec;
720 int tv_usec;
721};
722
723typedef union compat_sigval
724{
725 compat_int_t sival_int;
726 compat_uptr_t sival_ptr;
727} compat_sigval_t;
728
729typedef struct compat_siginfo
730{
731 int si_signo;
732 int si_errno;
733 int si_code;
734
735 union
736 {
737 int _pad[((128 / sizeof (int)) - 3)];
738
739 /* kill() */
740 struct
741 {
742 unsigned int _pid;
743 unsigned int _uid;
744 } _kill;
745
746 /* POSIX.1b timers */
747 struct
748 {
749 compat_timer_t _tid;
750 int _overrun;
751 compat_sigval_t _sigval;
752 } _timer;
753
754 /* POSIX.1b signals */
755 struct
756 {
757 unsigned int _pid;
758 unsigned int _uid;
759 compat_sigval_t _sigval;
760 } _rt;
761
762 /* SIGCHLD */
763 struct
764 {
765 unsigned int _pid;
766 unsigned int _uid;
767 int _status;
768 compat_clock_t _utime;
769 compat_clock_t _stime;
770 } _sigchld;
771
772 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
773 struct
774 {
775 unsigned int _addr;
776 } _sigfault;
777
778 /* SIGPOLL */
779 struct
780 {
781 int _band;
782 int _fd;
783 } _sigpoll;
784 } _sifields;
785} compat_siginfo_t;
786
c92b5177
L
787/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
788typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
789
790typedef struct compat_x32_siginfo
791{
792 int si_signo;
793 int si_errno;
794 int si_code;
795
796 union
797 {
798 int _pad[((128 / sizeof (int)) - 3)];
799
800 /* kill() */
801 struct
802 {
803 unsigned int _pid;
804 unsigned int _uid;
805 } _kill;
806
807 /* POSIX.1b timers */
808 struct
809 {
810 compat_timer_t _tid;
811 int _overrun;
812 compat_sigval_t _sigval;
813 } _timer;
814
815 /* POSIX.1b signals */
816 struct
817 {
818 unsigned int _pid;
819 unsigned int _uid;
820 compat_sigval_t _sigval;
821 } _rt;
822
823 /* SIGCHLD */
824 struct
825 {
826 unsigned int _pid;
827 unsigned int _uid;
828 int _status;
829 compat_x32_clock_t _utime;
830 compat_x32_clock_t _stime;
831 } _sigchld;
832
833 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
834 struct
835 {
836 unsigned int _addr;
837 } _sigfault;
838
839 /* SIGPOLL */
840 struct
841 {
842 int _band;
843 int _fd;
844 } _sigpoll;
845 } _sifields;
846} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
847
d0722149
DE
848#define cpt_si_pid _sifields._kill._pid
849#define cpt_si_uid _sifields._kill._uid
850#define cpt_si_timerid _sifields._timer._tid
851#define cpt_si_overrun _sifields._timer._overrun
852#define cpt_si_status _sifields._sigchld._status
853#define cpt_si_utime _sifields._sigchld._utime
854#define cpt_si_stime _sifields._sigchld._stime
855#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
856#define cpt_si_addr _sifields._sigfault._addr
857#define cpt_si_band _sifields._sigpoll._band
858#define cpt_si_fd _sifields._sigpoll._fd
859
860/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
861 In their place is si_timer1,si_timer2. */
862#ifndef si_timerid
863#define si_timerid si_timer1
864#endif
865#ifndef si_overrun
866#define si_overrun si_timer2
867#endif
868
869static void
870compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
871{
872 memset (to, 0, sizeof (*to));
873
874 to->si_signo = from->si_signo;
875 to->si_errno = from->si_errno;
876 to->si_code = from->si_code;
877
b53a1623 878 if (to->si_code == SI_TIMER)
d0722149 879 {
b53a1623
PA
880 to->cpt_si_timerid = from->si_timerid;
881 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
882 to->cpt_si_ptr = (intptr_t) from->si_ptr;
883 }
884 else if (to->si_code == SI_USER)
885 {
886 to->cpt_si_pid = from->si_pid;
887 to->cpt_si_uid = from->si_uid;
888 }
b53a1623 889 else if (to->si_code < 0)
d0722149 890 {
b53a1623
PA
891 to->cpt_si_pid = from->si_pid;
892 to->cpt_si_uid = from->si_uid;
d0722149
DE
893 to->cpt_si_ptr = (intptr_t) from->si_ptr;
894 }
895 else
896 {
897 switch (to->si_signo)
898 {
899 case SIGCHLD:
900 to->cpt_si_pid = from->si_pid;
901 to->cpt_si_uid = from->si_uid;
902 to->cpt_si_status = from->si_status;
903 to->cpt_si_utime = from->si_utime;
904 to->cpt_si_stime = from->si_stime;
905 break;
906 case SIGILL:
907 case SIGFPE:
908 case SIGSEGV:
909 case SIGBUS:
910 to->cpt_si_addr = (intptr_t) from->si_addr;
911 break;
912 case SIGPOLL:
913 to->cpt_si_band = from->si_band;
914 to->cpt_si_fd = from->si_fd;
915 break;
916 default:
917 to->cpt_si_pid = from->si_pid;
918 to->cpt_si_uid = from->si_uid;
919 to->cpt_si_ptr = (intptr_t) from->si_ptr;
920 break;
921 }
922 }
923}
924
925static void
926siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
927{
928 memset (to, 0, sizeof (*to));
929
930 to->si_signo = from->si_signo;
931 to->si_errno = from->si_errno;
932 to->si_code = from->si_code;
933
b53a1623 934 if (to->si_code == SI_TIMER)
d0722149 935 {
b53a1623
PA
936 to->si_timerid = from->cpt_si_timerid;
937 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
938 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
939 }
940 else if (to->si_code == SI_USER)
941 {
942 to->si_pid = from->cpt_si_pid;
943 to->si_uid = from->cpt_si_uid;
944 }
b53a1623 945 else if (to->si_code < 0)
d0722149 946 {
b53a1623
PA
947 to->si_pid = from->cpt_si_pid;
948 to->si_uid = from->cpt_si_uid;
d0722149
DE
949 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
950 }
951 else
952 {
953 switch (to->si_signo)
954 {
955 case SIGCHLD:
956 to->si_pid = from->cpt_si_pid;
957 to->si_uid = from->cpt_si_uid;
958 to->si_status = from->cpt_si_status;
959 to->si_utime = from->cpt_si_utime;
960 to->si_stime = from->cpt_si_stime;
961 break;
962 case SIGILL:
963 case SIGFPE:
964 case SIGSEGV:
965 case SIGBUS:
966 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
967 break;
968 case SIGPOLL:
969 to->si_band = from->cpt_si_band;
970 to->si_fd = from->cpt_si_fd;
971 break;
972 default:
973 to->si_pid = from->cpt_si_pid;
974 to->si_uid = from->cpt_si_uid;
975 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
976 break;
977 }
978 }
979}
980
c92b5177
L
981static void
982compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
983 siginfo_t *from)
984{
985 memset (to, 0, sizeof (*to));
986
987 to->si_signo = from->si_signo;
988 to->si_errno = from->si_errno;
989 to->si_code = from->si_code;
990
991 if (to->si_code == SI_TIMER)
992 {
993 to->cpt_si_timerid = from->si_timerid;
994 to->cpt_si_overrun = from->si_overrun;
995 to->cpt_si_ptr = (intptr_t) from->si_ptr;
996 }
997 else if (to->si_code == SI_USER)
998 {
999 to->cpt_si_pid = from->si_pid;
1000 to->cpt_si_uid = from->si_uid;
1001 }
1002 else if (to->si_code < 0)
1003 {
1004 to->cpt_si_pid = from->si_pid;
1005 to->cpt_si_uid = from->si_uid;
1006 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1007 }
1008 else
1009 {
1010 switch (to->si_signo)
1011 {
1012 case SIGCHLD:
1013 to->cpt_si_pid = from->si_pid;
1014 to->cpt_si_uid = from->si_uid;
1015 to->cpt_si_status = from->si_status;
1016 to->cpt_si_utime = from->si_utime;
1017 to->cpt_si_stime = from->si_stime;
1018 break;
1019 case SIGILL:
1020 case SIGFPE:
1021 case SIGSEGV:
1022 case SIGBUS:
1023 to->cpt_si_addr = (intptr_t) from->si_addr;
1024 break;
1025 case SIGPOLL:
1026 to->cpt_si_band = from->si_band;
1027 to->cpt_si_fd = from->si_fd;
1028 break;
1029 default:
1030 to->cpt_si_pid = from->si_pid;
1031 to->cpt_si_uid = from->si_uid;
1032 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1033 break;
1034 }
1035 }
1036}
1037
1038static void
1039siginfo_from_compat_x32_siginfo (siginfo_t *to,
1040 compat_x32_siginfo_t *from)
1041{
1042 memset (to, 0, sizeof (*to));
1043
1044 to->si_signo = from->si_signo;
1045 to->si_errno = from->si_errno;
1046 to->si_code = from->si_code;
1047
1048 if (to->si_code == SI_TIMER)
1049 {
1050 to->si_timerid = from->cpt_si_timerid;
1051 to->si_overrun = from->cpt_si_overrun;
1052 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1053 }
1054 else if (to->si_code == SI_USER)
1055 {
1056 to->si_pid = from->cpt_si_pid;
1057 to->si_uid = from->cpt_si_uid;
1058 }
1059 else if (to->si_code < 0)
1060 {
1061 to->si_pid = from->cpt_si_pid;
1062 to->si_uid = from->cpt_si_uid;
1063 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1064 }
1065 else
1066 {
1067 switch (to->si_signo)
1068 {
1069 case SIGCHLD:
1070 to->si_pid = from->cpt_si_pid;
1071 to->si_uid = from->cpt_si_uid;
1072 to->si_status = from->cpt_si_status;
1073 to->si_utime = from->cpt_si_utime;
1074 to->si_stime = from->cpt_si_stime;
1075 break;
1076 case SIGILL:
1077 case SIGFPE:
1078 case SIGSEGV:
1079 case SIGBUS:
1080 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1081 break;
1082 case SIGPOLL:
1083 to->si_band = from->cpt_si_band;
1084 to->si_fd = from->cpt_si_fd;
1085 break;
1086 default:
1087 to->si_pid = from->cpt_si_pid;
1088 to->si_uid = from->cpt_si_uid;
1089 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1090 break;
1091 }
1092 }
1093}
1094
1095/* Is this process 64-bit? */
1096static int linux_is_elf64;
d0722149
DE
1097#endif /* __x86_64__ */
1098
1099/* Convert a native/host siginfo object, into/from the siginfo in the
1100 layout of the inferiors' architecture. Returns true if any
1101 conversion was done; false otherwise. If DIRECTION is 1, then copy
1102 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1103 INF. */
1104
1105static int
a5362b9a 1106x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1107{
1108#ifdef __x86_64__
1109 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1110 if (register_size (0) == 4)
1111 {
a5362b9a 1112 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1113 fatal ("unexpected difference in siginfo");
d0722149
DE
1114
1115 if (direction == 0)
1116 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1117 else
1118 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1119
c92b5177
L
1120 return 1;
1121 }
1122 /* No fixup for native x32 GDB. */
1123 else if (!linux_is_elf64 && sizeof (void *) == 8)
1124 {
1125 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1126 fatal ("unexpected difference in siginfo");
1127
1128 if (direction == 0)
1129 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1130 native);
1131 else
1132 siginfo_from_compat_x32_siginfo (native,
1133 (struct compat_x32_siginfo *) inf);
1134
d0722149
DE
1135 return 1;
1136 }
1137#endif
1138
1139 return 0;
1140}
1141\f
1570b33e
L
1142static int use_xml;
1143
1144/* Update gdbserver_xmltarget. */
1145
1146static void
1147x86_linux_update_xmltarget (void)
1148{
3a13a53b
L
1149 int pid;
1150 struct regset_info *regset;
1570b33e
L
1151 static unsigned long long xcr0;
1152 static int have_ptrace_getregset = -1;
59e04013 1153#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
1154 static int have_ptrace_getfpxregs = -1;
1155#endif
1570b33e
L
1156
1157 if (!current_inferior)
1158 return;
1159
45ba0d02
PA
1160 /* Before changing the register cache internal layout or the target
1161 regsets, flush the contents of the current valid caches back to
1162 the threads. */
1163 regcache_invalidate ();
1164
3a13a53b 1165 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
1166#ifdef __x86_64__
1167 if (num_xmm_registers == 8)
1168 init_registers_i386_linux ();
4d47af5c 1169 else if (linux_is_elf64)
1570b33e 1170 init_registers_amd64_linux ();
4d47af5c
L
1171 else
1172 init_registers_x32_linux ();
1570b33e 1173#else
3a13a53b
L
1174 {
1175# ifdef HAVE_PTRACE_GETFPXREGS
1176 if (have_ptrace_getfpxregs == -1)
1177 {
1178 elf_fpxregset_t fpxregs;
1179
1180 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
1181 {
1182 have_ptrace_getfpxregs = 0;
1183 x86_xcr0 = I386_XSTATE_X87_MASK;
1184
1185 /* Disable PTRACE_GETFPXREGS. */
1186 for (regset = target_regsets;
1187 regset->fill_function != NULL; regset++)
1188 if (regset->get_request == PTRACE_GETFPXREGS)
1189 {
1190 regset->size = 0;
1191 break;
1192 }
1193 }
1194 else
1195 have_ptrace_getfpxregs = 1;
1196 }
1197
1198 if (!have_ptrace_getfpxregs)
1199 {
1200 init_registers_i386_mmx_linux ();
1201 return;
1202 }
1203# endif
1204 init_registers_i386_linux ();
1205 }
1570b33e
L
1206#endif
1207
1208 if (!use_xml)
1209 {
1210 /* Don't use XML. */
1211#ifdef __x86_64__
1212 if (num_xmm_registers == 8)
1213 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1214 else
1215 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1216#else
1217 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1218#endif
1219
1220 x86_xcr0 = I386_XSTATE_SSE_MASK;
1221
1222 return;
1223 }
1224
1225 /* Check if XSAVE extended state is supported. */
1226 if (have_ptrace_getregset == -1)
1227 {
1570b33e
L
1228 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1229 struct iovec iov;
1570b33e
L
1230
1231 iov.iov_base = xstateregs;
1232 iov.iov_len = sizeof (xstateregs);
1233
1234 /* Check if PTRACE_GETREGSET works. */
1235 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1236 &iov) < 0)
1237 {
1238 have_ptrace_getregset = 0;
1239 return;
1240 }
1241 else
1242 have_ptrace_getregset = 1;
1243
1244 /* Get XCR0 from XSAVE extended state at byte 464. */
1245 xcr0 = xstateregs[464 / sizeof (long long)];
1246
1247 /* Use PTRACE_GETREGSET if it is available. */
1248 for (regset = target_regsets;
1249 regset->fill_function != NULL; regset++)
1250 if (regset->get_request == PTRACE_GETREGSET)
1251 regset->size = I386_XSTATE_SIZE (xcr0);
1252 else if (regset->type != GENERAL_REGS)
1253 regset->size = 0;
1254 }
1255
1256 if (have_ptrace_getregset)
1257 {
1258 /* AVX is the highest feature we support. */
1259 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1260 {
1261 x86_xcr0 = xcr0;
1262
1263#ifdef __x86_64__
1264 /* I386 has 8 xmm regs. */
1265 if (num_xmm_registers == 8)
1266 init_registers_i386_avx_linux ();
4d47af5c 1267 else if (linux_is_elf64)
1570b33e 1268 init_registers_amd64_avx_linux ();
4d47af5c
L
1269 else
1270 init_registers_x32_avx_linux ();
1570b33e
L
1271#else
1272 init_registers_i386_avx_linux ();
1273#endif
1274 }
1275 }
1276}
1277
1278/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1279 PTRACE_GETREGSET. */
1280
1281static void
1282x86_linux_process_qsupported (const char *query)
1283{
1284 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1285 with "i386" in qSupported query, it supports x86 XML target
1286 descriptions. */
1287 use_xml = 0;
1288 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1289 {
1290 char *copy = xstrdup (query + 13);
1291 char *p;
1292
1293 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1294 {
1295 if (strcmp (p, "i386") == 0)
1296 {
1297 use_xml = 1;
1298 break;
1299 }
1300 }
1301
1302 free (copy);
1303 }
1304
1305 x86_linux_update_xmltarget ();
1306}
1307
9f1036c1 1308/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1309
1310static void
1311x86_arch_setup (void)
1312{
d0722149 1313 int pid = pid_of (get_thread_lwp (current_inferior));
214d508e
L
1314 unsigned int machine;
1315 int is_elf64 = linux_pid_exe_is_elf_64_file (pid, &machine);
d0722149 1316
214d508e
L
1317 if (sizeof (void *) == 4)
1318 {
1319 if (is_elf64 > 0)
1320 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1321#ifndef __x86_64__
1322 else if (machine == EM_X86_64)
1323 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1324#endif
1325 }
1326
1327#ifdef __x86_64__
1328 if (is_elf64 < 0)
d0722149
DE
1329 {
1330 /* This can only happen if /proc/<pid>/exe is unreadable,
1331 but "that can't happen" if we've gotten this far.
1332 Fall through and assume this is a 32-bit program. */
1333 }
214d508e 1334 else if (machine == EM_X86_64)
d0722149 1335 {
d0722149
DE
1336 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1337 the_low_target.num_regs = -1;
1338 the_low_target.regmap = NULL;
1339 the_low_target.cannot_fetch_register = NULL;
1340 the_low_target.cannot_store_register = NULL;
1341
1342 /* Amd64 has 16 xmm regs. */
1343 num_xmm_registers = 16;
1344
c92b5177 1345 linux_is_elf64 = is_elf64;
1570b33e 1346 x86_linux_update_xmltarget ();
d0722149
DE
1347 return;
1348 }
c92b5177
L
1349
1350 linux_is_elf64 = 0;
d0722149
DE
1351#endif
1352
1353 /* Ok we have a 32-bit inferior. */
1354
d0722149
DE
1355 the_low_target.num_regs = I386_NUM_REGS;
1356 the_low_target.regmap = i386_regmap;
1357 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1358 the_low_target.cannot_store_register = i386_cannot_store_register;
1359
1360 /* I386 has 8 xmm regs. */
1361 num_xmm_registers = 8;
1570b33e
L
1362
1363 x86_linux_update_xmltarget ();
d0722149
DE
1364}
1365
219f2f23
PA
1366static int
1367x86_supports_tracepoints (void)
1368{
1369 return 1;
1370}
1371
fa593d66
PA
1372static void
1373append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1374{
1375 write_inferior_memory (*to, buf, len);
1376 *to += len;
1377}
1378
1379static int
1380push_opcode (unsigned char *buf, char *op)
1381{
1382 unsigned char *buf_org = buf;
1383
1384 while (1)
1385 {
1386 char *endptr;
1387 unsigned long ul = strtoul (op, &endptr, 16);
1388
1389 if (endptr == op)
1390 break;
1391
1392 *buf++ = ul;
1393 op = endptr;
1394 }
1395
1396 return buf - buf_org;
1397}
1398
1399#ifdef __x86_64__
1400
1401/* Build a jump pad that saves registers and calls a collection
1402 function. Writes a jump instruction to the jump pad to
1403 JJUMPAD_INSN. The caller is responsible to write it in at the
1404 tracepoint address. */
1405
1406static int
1407amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1408 CORE_ADDR collector,
1409 CORE_ADDR lockaddr,
1410 ULONGEST orig_size,
1411 CORE_ADDR *jump_entry,
405f8e94
SS
1412 CORE_ADDR *trampoline,
1413 ULONGEST *trampoline_size,
fa593d66
PA
1414 unsigned char *jjump_pad_insn,
1415 ULONGEST *jjump_pad_insn_size,
1416 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1417 CORE_ADDR *adjusted_insn_addr_end,
1418 char *err)
fa593d66
PA
1419{
1420 unsigned char buf[40];
1421 int i, offset;
f4647387
YQ
1422 int64_t loffset;
1423
fa593d66
PA
1424 CORE_ADDR buildaddr = *jump_entry;
1425
1426 /* Build the jump pad. */
1427
1428 /* First, do tracepoint data collection. Save registers. */
1429 i = 0;
1430 /* Need to ensure stack pointer saved first. */
1431 buf[i++] = 0x54; /* push %rsp */
1432 buf[i++] = 0x55; /* push %rbp */
1433 buf[i++] = 0x57; /* push %rdi */
1434 buf[i++] = 0x56; /* push %rsi */
1435 buf[i++] = 0x52; /* push %rdx */
1436 buf[i++] = 0x51; /* push %rcx */
1437 buf[i++] = 0x53; /* push %rbx */
1438 buf[i++] = 0x50; /* push %rax */
1439 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1440 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1441 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1442 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1443 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1444 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1445 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1446 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1447 buf[i++] = 0x9c; /* pushfq */
1448 buf[i++] = 0x48; /* movl <addr>,%rdi */
1449 buf[i++] = 0xbf;
1450 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1451 i += sizeof (unsigned long);
1452 buf[i++] = 0x57; /* push %rdi */
1453 append_insns (&buildaddr, i, buf);
1454
1455 /* Stack space for the collecting_t object. */
1456 i = 0;
1457 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1458 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1459 memcpy (buf + i, &tpoint, 8);
1460 i += 8;
1461 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1462 i += push_opcode (&buf[i],
1463 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1464 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1465 append_insns (&buildaddr, i, buf);
1466
1467 /* spin-lock. */
1468 i = 0;
1469 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1470 memcpy (&buf[i], (void *) &lockaddr, 8);
1471 i += 8;
1472 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1473 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1474 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1475 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1476 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1477 append_insns (&buildaddr, i, buf);
1478
1479 /* Set up the gdb_collect call. */
1480 /* At this point, (stack pointer + 0x18) is the base of our saved
1481 register block. */
1482
1483 i = 0;
1484 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1485 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1486
1487 /* tpoint address may be 64-bit wide. */
1488 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1489 memcpy (buf + i, &tpoint, 8);
1490 i += 8;
1491 append_insns (&buildaddr, i, buf);
1492
1493 /* The collector function being in the shared library, may be
1494 >31-bits away off the jump pad. */
1495 i = 0;
1496 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1497 memcpy (buf + i, &collector, 8);
1498 i += 8;
1499 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1500 append_insns (&buildaddr, i, buf);
1501
1502 /* Clear the spin-lock. */
1503 i = 0;
1504 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1505 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1506 memcpy (buf + i, &lockaddr, 8);
1507 i += 8;
1508 append_insns (&buildaddr, i, buf);
1509
1510 /* Remove stack that had been used for the collect_t object. */
1511 i = 0;
1512 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1513 append_insns (&buildaddr, i, buf);
1514
1515 /* Restore register state. */
1516 i = 0;
1517 buf[i++] = 0x48; /* add $0x8,%rsp */
1518 buf[i++] = 0x83;
1519 buf[i++] = 0xc4;
1520 buf[i++] = 0x08;
1521 buf[i++] = 0x9d; /* popfq */
1522 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1523 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1524 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1525 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1526 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1527 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1528 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1529 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1530 buf[i++] = 0x58; /* pop %rax */
1531 buf[i++] = 0x5b; /* pop %rbx */
1532 buf[i++] = 0x59; /* pop %rcx */
1533 buf[i++] = 0x5a; /* pop %rdx */
1534 buf[i++] = 0x5e; /* pop %rsi */
1535 buf[i++] = 0x5f; /* pop %rdi */
1536 buf[i++] = 0x5d; /* pop %rbp */
1537 buf[i++] = 0x5c; /* pop %rsp */
1538 append_insns (&buildaddr, i, buf);
1539
1540 /* Now, adjust the original instruction to execute in the jump
1541 pad. */
1542 *adjusted_insn_addr = buildaddr;
1543 relocate_instruction (&buildaddr, tpaddr);
1544 *adjusted_insn_addr_end = buildaddr;
1545
1546 /* Finally, write a jump back to the program. */
f4647387
YQ
1547
1548 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1549 if (loffset > INT_MAX || loffset < INT_MIN)
1550 {
1551 sprintf (err,
1552 "E.Jump back from jump pad too far from tracepoint "
1553 "(offset 0x%" PRIx64 " > int32).", loffset);
1554 return 1;
1555 }
1556
1557 offset = (int) loffset;
fa593d66
PA
1558 memcpy (buf, jump_insn, sizeof (jump_insn));
1559 memcpy (buf + 1, &offset, 4);
1560 append_insns (&buildaddr, sizeof (jump_insn), buf);
1561
1562 /* The jump pad is now built. Wire in a jump to our jump pad. This
1563 is always done last (by our caller actually), so that we can
1564 install fast tracepoints with threads running. This relies on
1565 the agent's atomic write support. */
f4647387
YQ
1566 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1567 if (loffset > INT_MAX || loffset < INT_MIN)
1568 {
1569 sprintf (err,
1570 "E.Jump pad too far from tracepoint "
1571 "(offset 0x%" PRIx64 " > int32).", loffset);
1572 return 1;
1573 }
1574
1575 offset = (int) loffset;
1576
fa593d66
PA
1577 memcpy (buf, jump_insn, sizeof (jump_insn));
1578 memcpy (buf + 1, &offset, 4);
1579 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1580 *jjump_pad_insn_size = sizeof (jump_insn);
1581
1582 /* Return the end address of our pad. */
1583 *jump_entry = buildaddr;
1584
1585 return 0;
1586}
1587
1588#endif /* __x86_64__ */
1589
1590/* Build a jump pad that saves registers and calls a collection
1591 function. Writes a jump instruction to the jump pad to
1592 JJUMPAD_INSN. The caller is responsible to write it in at the
1593 tracepoint address. */
1594
1595static int
1596i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1597 CORE_ADDR collector,
1598 CORE_ADDR lockaddr,
1599 ULONGEST orig_size,
1600 CORE_ADDR *jump_entry,
405f8e94
SS
1601 CORE_ADDR *trampoline,
1602 ULONGEST *trampoline_size,
fa593d66
PA
1603 unsigned char *jjump_pad_insn,
1604 ULONGEST *jjump_pad_insn_size,
1605 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1606 CORE_ADDR *adjusted_insn_addr_end,
1607 char *err)
fa593d66
PA
1608{
1609 unsigned char buf[0x100];
1610 int i, offset;
1611 CORE_ADDR buildaddr = *jump_entry;
1612
1613 /* Build the jump pad. */
1614
1615 /* First, do tracepoint data collection. Save registers. */
1616 i = 0;
1617 buf[i++] = 0x60; /* pushad */
1618 buf[i++] = 0x68; /* push tpaddr aka $pc */
1619 *((int *)(buf + i)) = (int) tpaddr;
1620 i += 4;
1621 buf[i++] = 0x9c; /* pushf */
1622 buf[i++] = 0x1e; /* push %ds */
1623 buf[i++] = 0x06; /* push %es */
1624 buf[i++] = 0x0f; /* push %fs */
1625 buf[i++] = 0xa0;
1626 buf[i++] = 0x0f; /* push %gs */
1627 buf[i++] = 0xa8;
1628 buf[i++] = 0x16; /* push %ss */
1629 buf[i++] = 0x0e; /* push %cs */
1630 append_insns (&buildaddr, i, buf);
1631
1632 /* Stack space for the collecting_t object. */
1633 i = 0;
1634 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1635
1636 /* Build the object. */
1637 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1638 memcpy (buf + i, &tpoint, 4);
1639 i += 4;
1640 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1641
1642 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1643 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1644 append_insns (&buildaddr, i, buf);
1645
1646 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1647 If we cared for it, this could be using xchg alternatively. */
1648
1649 i = 0;
1650 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1651 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1652 %esp,<lockaddr> */
1653 memcpy (&buf[i], (void *) &lockaddr, 4);
1654 i += 4;
1655 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1656 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1657 append_insns (&buildaddr, i, buf);
1658
1659
1660 /* Set up arguments to the gdb_collect call. */
1661 i = 0;
1662 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1663 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1664 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1665 append_insns (&buildaddr, i, buf);
1666
1667 i = 0;
1668 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1669 append_insns (&buildaddr, i, buf);
1670
1671 i = 0;
1672 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1673 memcpy (&buf[i], (void *) &tpoint, 4);
1674 i += 4;
1675 append_insns (&buildaddr, i, buf);
1676
1677 buf[0] = 0xe8; /* call <reladdr> */
1678 offset = collector - (buildaddr + sizeof (jump_insn));
1679 memcpy (buf + 1, &offset, 4);
1680 append_insns (&buildaddr, 5, buf);
1681 /* Clean up after the call. */
1682 buf[0] = 0x83; /* add $0x8,%esp */
1683 buf[1] = 0xc4;
1684 buf[2] = 0x08;
1685 append_insns (&buildaddr, 3, buf);
1686
1687
1688 /* Clear the spin-lock. This would need the LOCK prefix on older
1689 broken archs. */
1690 i = 0;
1691 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1692 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1693 memcpy (buf + i, &lockaddr, 4);
1694 i += 4;
1695 append_insns (&buildaddr, i, buf);
1696
1697
1698 /* Remove stack that had been used for the collect_t object. */
1699 i = 0;
1700 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1701 append_insns (&buildaddr, i, buf);
1702
1703 i = 0;
1704 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1705 buf[i++] = 0xc4;
1706 buf[i++] = 0x04;
1707 buf[i++] = 0x17; /* pop %ss */
1708 buf[i++] = 0x0f; /* pop %gs */
1709 buf[i++] = 0xa9;
1710 buf[i++] = 0x0f; /* pop %fs */
1711 buf[i++] = 0xa1;
1712 buf[i++] = 0x07; /* pop %es */
405f8e94 1713 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1714 buf[i++] = 0x9d; /* popf */
1715 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1716 buf[i++] = 0xc4;
1717 buf[i++] = 0x04;
1718 buf[i++] = 0x61; /* popad */
1719 append_insns (&buildaddr, i, buf);
1720
1721 /* Now, adjust the original instruction to execute in the jump
1722 pad. */
1723 *adjusted_insn_addr = buildaddr;
1724 relocate_instruction (&buildaddr, tpaddr);
1725 *adjusted_insn_addr_end = buildaddr;
1726
1727 /* Write the jump back to the program. */
1728 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1729 memcpy (buf, jump_insn, sizeof (jump_insn));
1730 memcpy (buf + 1, &offset, 4);
1731 append_insns (&buildaddr, sizeof (jump_insn), buf);
1732
1733 /* The jump pad is now built. Wire in a jump to our jump pad. This
1734 is always done last (by our caller actually), so that we can
1735 install fast tracepoints with threads running. This relies on
1736 the agent's atomic write support. */
405f8e94
SS
1737 if (orig_size == 4)
1738 {
1739 /* Create a trampoline. */
1740 *trampoline_size = sizeof (jump_insn);
1741 if (!claim_trampoline_space (*trampoline_size, trampoline))
1742 {
1743 /* No trampoline space available. */
1744 strcpy (err,
1745 "E.Cannot allocate trampoline space needed for fast "
1746 "tracepoints on 4-byte instructions.");
1747 return 1;
1748 }
1749
1750 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1751 memcpy (buf, jump_insn, sizeof (jump_insn));
1752 memcpy (buf + 1, &offset, 4);
1753 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1754
1755 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1756 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1757 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1758 memcpy (buf + 2, &offset, 2);
1759 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1760 *jjump_pad_insn_size = sizeof (small_jump_insn);
1761 }
1762 else
1763 {
1764 /* Else use a 32-bit relative jump instruction. */
1765 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1766 memcpy (buf, jump_insn, sizeof (jump_insn));
1767 memcpy (buf + 1, &offset, 4);
1768 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1769 *jjump_pad_insn_size = sizeof (jump_insn);
1770 }
fa593d66
PA
1771
1772 /* Return the end address of our pad. */
1773 *jump_entry = buildaddr;
1774
1775 return 0;
1776}
1777
1778static int
1779x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1780 CORE_ADDR collector,
1781 CORE_ADDR lockaddr,
1782 ULONGEST orig_size,
1783 CORE_ADDR *jump_entry,
405f8e94
SS
1784 CORE_ADDR *trampoline,
1785 ULONGEST *trampoline_size,
fa593d66
PA
1786 unsigned char *jjump_pad_insn,
1787 ULONGEST *jjump_pad_insn_size,
1788 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1789 CORE_ADDR *adjusted_insn_addr_end,
1790 char *err)
fa593d66
PA
1791{
1792#ifdef __x86_64__
1793 if (register_size (0) == 8)
1794 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1795 collector, lockaddr,
1796 orig_size, jump_entry,
405f8e94 1797 trampoline, trampoline_size,
fa593d66
PA
1798 jjump_pad_insn,
1799 jjump_pad_insn_size,
1800 adjusted_insn_addr,
405f8e94
SS
1801 adjusted_insn_addr_end,
1802 err);
fa593d66
PA
1803#endif
1804
1805 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1806 collector, lockaddr,
1807 orig_size, jump_entry,
405f8e94 1808 trampoline, trampoline_size,
fa593d66
PA
1809 jjump_pad_insn,
1810 jjump_pad_insn_size,
1811 adjusted_insn_addr,
405f8e94
SS
1812 adjusted_insn_addr_end,
1813 err);
1814}
1815
1816/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1817 architectures. */
1818
1819static int
1820x86_get_min_fast_tracepoint_insn_len (void)
1821{
1822 static int warned_about_fast_tracepoints = 0;
1823
1824#ifdef __x86_64__
1825 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1826 used for fast tracepoints. */
1827 if (register_size (0) == 8)
1828 return 5;
1829#endif
1830
58b4daa5 1831 if (agent_loaded_p ())
405f8e94
SS
1832 {
1833 char errbuf[IPA_BUFSIZ];
1834
1835 errbuf[0] = '\0';
1836
1837 /* On x86, if trampolines are available, then 4-byte jump instructions
1838 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1839 with a 4-byte offset are used instead. */
1840 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1841 return 4;
1842 else
1843 {
1844 /* GDB has no channel to explain to user why a shorter fast
1845 tracepoint is not possible, but at least make GDBserver
1846 mention that something has gone awry. */
1847 if (!warned_about_fast_tracepoints)
1848 {
1849 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1850 warned_about_fast_tracepoints = 1;
1851 }
1852 return 5;
1853 }
1854 }
1855 else
1856 {
1857 /* Indicate that the minimum length is currently unknown since the IPA
1858 has not loaded yet. */
1859 return 0;
1860 }
fa593d66
PA
1861}
1862
6a271cae
PA
1863static void
1864add_insns (unsigned char *start, int len)
1865{
1866 CORE_ADDR buildaddr = current_insn_ptr;
1867
1868 if (debug_threads)
1869 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1870 len, paddress (buildaddr));
1871
1872 append_insns (&buildaddr, len, start);
1873 current_insn_ptr = buildaddr;
1874}
1875
6a271cae
PA
1876/* Our general strategy for emitting code is to avoid specifying raw
1877 bytes whenever possible, and instead copy a block of inline asm
1878 that is embedded in the function. This is a little messy, because
1879 we need to keep the compiler from discarding what looks like dead
1880 code, plus suppress various warnings. */
1881
9e4344e5
PA
1882#define EMIT_ASM(NAME, INSNS) \
1883 do \
1884 { \
1885 extern unsigned char start_ ## NAME, end_ ## NAME; \
1886 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1887 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1888 "\t" "start_" #NAME ":" \
1889 "\t" INSNS "\n" \
1890 "\t" "end_" #NAME ":"); \
1891 } while (0)
6a271cae
PA
1892
1893#ifdef __x86_64__
1894
1895#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1896 do \
1897 { \
1898 extern unsigned char start_ ## NAME, end_ ## NAME; \
1899 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1900 __asm__ (".code32\n" \
1901 "\t" "jmp end_" #NAME "\n" \
1902 "\t" "start_" #NAME ":\n" \
1903 "\t" INSNS "\n" \
1904 "\t" "end_" #NAME ":\n" \
1905 ".code64\n"); \
1906 } while (0)
6a271cae
PA
1907
1908#else
1909
1910#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1911
1912#endif
1913
1914#ifdef __x86_64__
1915
1916static void
1917amd64_emit_prologue (void)
1918{
1919 EMIT_ASM (amd64_prologue,
1920 "pushq %rbp\n\t"
1921 "movq %rsp,%rbp\n\t"
1922 "sub $0x20,%rsp\n\t"
1923 "movq %rdi,-8(%rbp)\n\t"
1924 "movq %rsi,-16(%rbp)");
1925}
1926
1927
1928static void
1929amd64_emit_epilogue (void)
1930{
1931 EMIT_ASM (amd64_epilogue,
1932 "movq -16(%rbp),%rdi\n\t"
1933 "movq %rax,(%rdi)\n\t"
1934 "xor %rax,%rax\n\t"
1935 "leave\n\t"
1936 "ret");
1937}
1938
1939static void
1940amd64_emit_add (void)
1941{
1942 EMIT_ASM (amd64_add,
1943 "add (%rsp),%rax\n\t"
1944 "lea 0x8(%rsp),%rsp");
1945}
1946
1947static void
1948amd64_emit_sub (void)
1949{
1950 EMIT_ASM (amd64_sub,
1951 "sub %rax,(%rsp)\n\t"
1952 "pop %rax");
1953}
1954
1955static void
1956amd64_emit_mul (void)
1957{
1958 emit_error = 1;
1959}
1960
1961static void
1962amd64_emit_lsh (void)
1963{
1964 emit_error = 1;
1965}
1966
1967static void
1968amd64_emit_rsh_signed (void)
1969{
1970 emit_error = 1;
1971}
1972
1973static void
1974amd64_emit_rsh_unsigned (void)
1975{
1976 emit_error = 1;
1977}
1978
1979static void
1980amd64_emit_ext (int arg)
1981{
1982 switch (arg)
1983 {
1984 case 8:
1985 EMIT_ASM (amd64_ext_8,
1986 "cbtw\n\t"
1987 "cwtl\n\t"
1988 "cltq");
1989 break;
1990 case 16:
1991 EMIT_ASM (amd64_ext_16,
1992 "cwtl\n\t"
1993 "cltq");
1994 break;
1995 case 32:
1996 EMIT_ASM (amd64_ext_32,
1997 "cltq");
1998 break;
1999 default:
2000 emit_error = 1;
2001 }
2002}
2003
2004static void
2005amd64_emit_log_not (void)
2006{
2007 EMIT_ASM (amd64_log_not,
2008 "test %rax,%rax\n\t"
2009 "sete %cl\n\t"
2010 "movzbq %cl,%rax");
2011}
2012
2013static void
2014amd64_emit_bit_and (void)
2015{
2016 EMIT_ASM (amd64_and,
2017 "and (%rsp),%rax\n\t"
2018 "lea 0x8(%rsp),%rsp");
2019}
2020
2021static void
2022amd64_emit_bit_or (void)
2023{
2024 EMIT_ASM (amd64_or,
2025 "or (%rsp),%rax\n\t"
2026 "lea 0x8(%rsp),%rsp");
2027}
2028
2029static void
2030amd64_emit_bit_xor (void)
2031{
2032 EMIT_ASM (amd64_xor,
2033 "xor (%rsp),%rax\n\t"
2034 "lea 0x8(%rsp),%rsp");
2035}
2036
2037static void
2038amd64_emit_bit_not (void)
2039{
2040 EMIT_ASM (amd64_bit_not,
2041 "xorq $0xffffffffffffffff,%rax");
2042}
2043
2044static void
2045amd64_emit_equal (void)
2046{
2047 EMIT_ASM (amd64_equal,
2048 "cmp %rax,(%rsp)\n\t"
2049 "je .Lamd64_equal_true\n\t"
2050 "xor %rax,%rax\n\t"
2051 "jmp .Lamd64_equal_end\n\t"
2052 ".Lamd64_equal_true:\n\t"
2053 "mov $0x1,%rax\n\t"
2054 ".Lamd64_equal_end:\n\t"
2055 "lea 0x8(%rsp),%rsp");
2056}
2057
2058static void
2059amd64_emit_less_signed (void)
2060{
2061 EMIT_ASM (amd64_less_signed,
2062 "cmp %rax,(%rsp)\n\t"
2063 "jl .Lamd64_less_signed_true\n\t"
2064 "xor %rax,%rax\n\t"
2065 "jmp .Lamd64_less_signed_end\n\t"
2066 ".Lamd64_less_signed_true:\n\t"
2067 "mov $1,%rax\n\t"
2068 ".Lamd64_less_signed_end:\n\t"
2069 "lea 0x8(%rsp),%rsp");
2070}
2071
2072static void
2073amd64_emit_less_unsigned (void)
2074{
2075 EMIT_ASM (amd64_less_unsigned,
2076 "cmp %rax,(%rsp)\n\t"
2077 "jb .Lamd64_less_unsigned_true\n\t"
2078 "xor %rax,%rax\n\t"
2079 "jmp .Lamd64_less_unsigned_end\n\t"
2080 ".Lamd64_less_unsigned_true:\n\t"
2081 "mov $1,%rax\n\t"
2082 ".Lamd64_less_unsigned_end:\n\t"
2083 "lea 0x8(%rsp),%rsp");
2084}
2085
2086static void
2087amd64_emit_ref (int size)
2088{
2089 switch (size)
2090 {
2091 case 1:
2092 EMIT_ASM (amd64_ref1,
2093 "movb (%rax),%al");
2094 break;
2095 case 2:
2096 EMIT_ASM (amd64_ref2,
2097 "movw (%rax),%ax");
2098 break;
2099 case 4:
2100 EMIT_ASM (amd64_ref4,
2101 "movl (%rax),%eax");
2102 break;
2103 case 8:
2104 EMIT_ASM (amd64_ref8,
2105 "movq (%rax),%rax");
2106 break;
2107 }
2108}
2109
2110static void
2111amd64_emit_if_goto (int *offset_p, int *size_p)
2112{
2113 EMIT_ASM (amd64_if_goto,
2114 "mov %rax,%rcx\n\t"
2115 "pop %rax\n\t"
2116 "cmp $0,%rcx\n\t"
2117 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2118 if (offset_p)
2119 *offset_p = 10;
2120 if (size_p)
2121 *size_p = 4;
2122}
2123
2124static void
2125amd64_emit_goto (int *offset_p, int *size_p)
2126{
2127 EMIT_ASM (amd64_goto,
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2129 if (offset_p)
2130 *offset_p = 1;
2131 if (size_p)
2132 *size_p = 4;
2133}
2134
2135static void
2136amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2137{
2138 int diff = (to - (from + size));
2139 unsigned char buf[sizeof (int)];
2140
2141 if (size != 4)
2142 {
2143 emit_error = 1;
2144 return;
2145 }
2146
2147 memcpy (buf, &diff, sizeof (int));
2148 write_inferior_memory (from, buf, sizeof (int));
2149}
2150
2151static void
4e29fb54 2152amd64_emit_const (LONGEST num)
6a271cae
PA
2153{
2154 unsigned char buf[16];
2155 int i;
2156 CORE_ADDR buildaddr = current_insn_ptr;
2157
2158 i = 0;
2159 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2160 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2161 i += 8;
2162 append_insns (&buildaddr, i, buf);
2163 current_insn_ptr = buildaddr;
2164}
2165
2166static void
2167amd64_emit_call (CORE_ADDR fn)
2168{
2169 unsigned char buf[16];
2170 int i;
2171 CORE_ADDR buildaddr;
4e29fb54 2172 LONGEST offset64;
6a271cae
PA
2173
2174 /* The destination function being in the shared library, may be
2175 >31-bits away off the compiled code pad. */
2176
2177 buildaddr = current_insn_ptr;
2178
2179 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2180
2181 i = 0;
2182
2183 if (offset64 > INT_MAX || offset64 < INT_MIN)
2184 {
2185 /* Offset is too large for a call. Use callq, but that requires
2186 a register, so avoid it if possible. Use r10, since it is
2187 call-clobbered, we don't have to push/pop it. */
2188 buf[i++] = 0x48; /* mov $fn,%r10 */
2189 buf[i++] = 0xba;
2190 memcpy (buf + i, &fn, 8);
2191 i += 8;
2192 buf[i++] = 0xff; /* callq *%r10 */
2193 buf[i++] = 0xd2;
2194 }
2195 else
2196 {
2197 int offset32 = offset64; /* we know we can't overflow here. */
2198 memcpy (buf + i, &offset32, 4);
2199 i += 4;
2200 }
2201
2202 append_insns (&buildaddr, i, buf);
2203 current_insn_ptr = buildaddr;
2204}
2205
2206static void
2207amd64_emit_reg (int reg)
2208{
2209 unsigned char buf[16];
2210 int i;
2211 CORE_ADDR buildaddr;
2212
2213 /* Assume raw_regs is still in %rdi. */
2214 buildaddr = current_insn_ptr;
2215 i = 0;
2216 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2217 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2218 i += 4;
2219 append_insns (&buildaddr, i, buf);
2220 current_insn_ptr = buildaddr;
2221 amd64_emit_call (get_raw_reg_func_addr ());
2222}
2223
2224static void
2225amd64_emit_pop (void)
2226{
2227 EMIT_ASM (amd64_pop,
2228 "pop %rax");
2229}
2230
2231static void
2232amd64_emit_stack_flush (void)
2233{
2234 EMIT_ASM (amd64_stack_flush,
2235 "push %rax");
2236}
2237
2238static void
2239amd64_emit_zero_ext (int arg)
2240{
2241 switch (arg)
2242 {
2243 case 8:
2244 EMIT_ASM (amd64_zero_ext_8,
2245 "and $0xff,%rax");
2246 break;
2247 case 16:
2248 EMIT_ASM (amd64_zero_ext_16,
2249 "and $0xffff,%rax");
2250 break;
2251 case 32:
2252 EMIT_ASM (amd64_zero_ext_32,
2253 "mov $0xffffffff,%rcx\n\t"
2254 "and %rcx,%rax");
2255 break;
2256 default:
2257 emit_error = 1;
2258 }
2259}
2260
2261static void
2262amd64_emit_swap (void)
2263{
2264 EMIT_ASM (amd64_swap,
2265 "mov %rax,%rcx\n\t"
2266 "pop %rax\n\t"
2267 "push %rcx");
2268}
2269
2270static void
2271amd64_emit_stack_adjust (int n)
2272{
2273 unsigned char buf[16];
2274 int i;
2275 CORE_ADDR buildaddr = current_insn_ptr;
2276
2277 i = 0;
2278 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2279 buf[i++] = 0x8d;
2280 buf[i++] = 0x64;
2281 buf[i++] = 0x24;
2282 /* This only handles adjustments up to 16, but we don't expect any more. */
2283 buf[i++] = n * 8;
2284 append_insns (&buildaddr, i, buf);
2285 current_insn_ptr = buildaddr;
2286}
2287
2288/* FN's prototype is `LONGEST(*fn)(int)'. */
2289
2290static void
2291amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2292{
2293 unsigned char buf[16];
2294 int i;
2295 CORE_ADDR buildaddr;
2296
2297 buildaddr = current_insn_ptr;
2298 i = 0;
2299 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2300 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2301 i += 4;
2302 append_insns (&buildaddr, i, buf);
2303 current_insn_ptr = buildaddr;
2304 amd64_emit_call (fn);
2305}
2306
4e29fb54 2307/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2308
2309static void
2310amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2311{
2312 unsigned char buf[16];
2313 int i;
2314 CORE_ADDR buildaddr;
2315
2316 buildaddr = current_insn_ptr;
2317 i = 0;
2318 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2319 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2320 i += 4;
2321 append_insns (&buildaddr, i, buf);
2322 current_insn_ptr = buildaddr;
2323 EMIT_ASM (amd64_void_call_2_a,
2324 /* Save away a copy of the stack top. */
2325 "push %rax\n\t"
2326 /* Also pass top as the second argument. */
2327 "mov %rax,%rsi");
2328 amd64_emit_call (fn);
2329 EMIT_ASM (amd64_void_call_2_b,
2330 /* Restore the stack top, %rax may have been trashed. */
2331 "pop %rax");
2332}
2333
6b9801d4
SS
2334void
2335amd64_emit_eq_goto (int *offset_p, int *size_p)
2336{
2337 EMIT_ASM (amd64_eq,
2338 "cmp %rax,(%rsp)\n\t"
2339 "jne .Lamd64_eq_fallthru\n\t"
2340 "lea 0x8(%rsp),%rsp\n\t"
2341 "pop %rax\n\t"
2342 /* jmp, but don't trust the assembler to choose the right jump */
2343 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2344 ".Lamd64_eq_fallthru:\n\t"
2345 "lea 0x8(%rsp),%rsp\n\t"
2346 "pop %rax");
2347
2348 if (offset_p)
2349 *offset_p = 13;
2350 if (size_p)
2351 *size_p = 4;
2352}
2353
2354void
2355amd64_emit_ne_goto (int *offset_p, int *size_p)
2356{
2357 EMIT_ASM (amd64_ne,
2358 "cmp %rax,(%rsp)\n\t"
2359 "je .Lamd64_ne_fallthru\n\t"
2360 "lea 0x8(%rsp),%rsp\n\t"
2361 "pop %rax\n\t"
2362 /* jmp, but don't trust the assembler to choose the right jump */
2363 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2364 ".Lamd64_ne_fallthru:\n\t"
2365 "lea 0x8(%rsp),%rsp\n\t"
2366 "pop %rax");
2367
2368 if (offset_p)
2369 *offset_p = 13;
2370 if (size_p)
2371 *size_p = 4;
2372}
2373
2374void
2375amd64_emit_lt_goto (int *offset_p, int *size_p)
2376{
2377 EMIT_ASM (amd64_lt,
2378 "cmp %rax,(%rsp)\n\t"
2379 "jnl .Lamd64_lt_fallthru\n\t"
2380 "lea 0x8(%rsp),%rsp\n\t"
2381 "pop %rax\n\t"
2382 /* jmp, but don't trust the assembler to choose the right jump */
2383 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2384 ".Lamd64_lt_fallthru:\n\t"
2385 "lea 0x8(%rsp),%rsp\n\t"
2386 "pop %rax");
2387
2388 if (offset_p)
2389 *offset_p = 13;
2390 if (size_p)
2391 *size_p = 4;
2392}
2393
2394void
2395amd64_emit_le_goto (int *offset_p, int *size_p)
2396{
2397 EMIT_ASM (amd64_le,
2398 "cmp %rax,(%rsp)\n\t"
2399 "jnle .Lamd64_le_fallthru\n\t"
2400 "lea 0x8(%rsp),%rsp\n\t"
2401 "pop %rax\n\t"
2402 /* jmp, but don't trust the assembler to choose the right jump */
2403 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2404 ".Lamd64_le_fallthru:\n\t"
2405 "lea 0x8(%rsp),%rsp\n\t"
2406 "pop %rax");
2407
2408 if (offset_p)
2409 *offset_p = 13;
2410 if (size_p)
2411 *size_p = 4;
2412}
2413
2414void
2415amd64_emit_gt_goto (int *offset_p, int *size_p)
2416{
2417 EMIT_ASM (amd64_gt,
2418 "cmp %rax,(%rsp)\n\t"
2419 "jng .Lamd64_gt_fallthru\n\t"
2420 "lea 0x8(%rsp),%rsp\n\t"
2421 "pop %rax\n\t"
2422 /* jmp, but don't trust the assembler to choose the right jump */
2423 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2424 ".Lamd64_gt_fallthru:\n\t"
2425 "lea 0x8(%rsp),%rsp\n\t"
2426 "pop %rax");
2427
2428 if (offset_p)
2429 *offset_p = 13;
2430 if (size_p)
2431 *size_p = 4;
2432}
2433
2434void
2435amd64_emit_ge_goto (int *offset_p, int *size_p)
2436{
2437 EMIT_ASM (amd64_ge,
2438 "cmp %rax,(%rsp)\n\t"
2439 "jnge .Lamd64_ge_fallthru\n\t"
2440 ".Lamd64_ge_jump:\n\t"
2441 "lea 0x8(%rsp),%rsp\n\t"
2442 "pop %rax\n\t"
2443 /* jmp, but don't trust the assembler to choose the right jump */
2444 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2445 ".Lamd64_ge_fallthru:\n\t"
2446 "lea 0x8(%rsp),%rsp\n\t"
2447 "pop %rax");
2448
2449 if (offset_p)
2450 *offset_p = 13;
2451 if (size_p)
2452 *size_p = 4;
2453}
2454
6a271cae
PA
2455struct emit_ops amd64_emit_ops =
2456 {
2457 amd64_emit_prologue,
2458 amd64_emit_epilogue,
2459 amd64_emit_add,
2460 amd64_emit_sub,
2461 amd64_emit_mul,
2462 amd64_emit_lsh,
2463 amd64_emit_rsh_signed,
2464 amd64_emit_rsh_unsigned,
2465 amd64_emit_ext,
2466 amd64_emit_log_not,
2467 amd64_emit_bit_and,
2468 amd64_emit_bit_or,
2469 amd64_emit_bit_xor,
2470 amd64_emit_bit_not,
2471 amd64_emit_equal,
2472 amd64_emit_less_signed,
2473 amd64_emit_less_unsigned,
2474 amd64_emit_ref,
2475 amd64_emit_if_goto,
2476 amd64_emit_goto,
2477 amd64_write_goto_address,
2478 amd64_emit_const,
2479 amd64_emit_call,
2480 amd64_emit_reg,
2481 amd64_emit_pop,
2482 amd64_emit_stack_flush,
2483 amd64_emit_zero_ext,
2484 amd64_emit_swap,
2485 amd64_emit_stack_adjust,
2486 amd64_emit_int_call_1,
6b9801d4
SS
2487 amd64_emit_void_call_2,
2488 amd64_emit_eq_goto,
2489 amd64_emit_ne_goto,
2490 amd64_emit_lt_goto,
2491 amd64_emit_le_goto,
2492 amd64_emit_gt_goto,
2493 amd64_emit_ge_goto
6a271cae
PA
2494 };
2495
2496#endif /* __x86_64__ */
2497
2498static void
2499i386_emit_prologue (void)
2500{
2501 EMIT_ASM32 (i386_prologue,
2502 "push %ebp\n\t"
bf15cbda
SS
2503 "mov %esp,%ebp\n\t"
2504 "push %ebx");
6a271cae
PA
2505 /* At this point, the raw regs base address is at 8(%ebp), and the
2506 value pointer is at 12(%ebp). */
2507}
2508
2509static void
2510i386_emit_epilogue (void)
2511{
2512 EMIT_ASM32 (i386_epilogue,
2513 "mov 12(%ebp),%ecx\n\t"
2514 "mov %eax,(%ecx)\n\t"
2515 "mov %ebx,0x4(%ecx)\n\t"
2516 "xor %eax,%eax\n\t"
bf15cbda 2517 "pop %ebx\n\t"
6a271cae
PA
2518 "pop %ebp\n\t"
2519 "ret");
2520}
2521
2522static void
2523i386_emit_add (void)
2524{
2525 EMIT_ASM32 (i386_add,
2526 "add (%esp),%eax\n\t"
2527 "adc 0x4(%esp),%ebx\n\t"
2528 "lea 0x8(%esp),%esp");
2529}
2530
2531static void
2532i386_emit_sub (void)
2533{
2534 EMIT_ASM32 (i386_sub,
2535 "subl %eax,(%esp)\n\t"
2536 "sbbl %ebx,4(%esp)\n\t"
2537 "pop %eax\n\t"
2538 "pop %ebx\n\t");
2539}
2540
2541static void
2542i386_emit_mul (void)
2543{
2544 emit_error = 1;
2545}
2546
2547static void
2548i386_emit_lsh (void)
2549{
2550 emit_error = 1;
2551}
2552
2553static void
2554i386_emit_rsh_signed (void)
2555{
2556 emit_error = 1;
2557}
2558
2559static void
2560i386_emit_rsh_unsigned (void)
2561{
2562 emit_error = 1;
2563}
2564
2565static void
2566i386_emit_ext (int arg)
2567{
2568 switch (arg)
2569 {
2570 case 8:
2571 EMIT_ASM32 (i386_ext_8,
2572 "cbtw\n\t"
2573 "cwtl\n\t"
2574 "movl %eax,%ebx\n\t"
2575 "sarl $31,%ebx");
2576 break;
2577 case 16:
2578 EMIT_ASM32 (i386_ext_16,
2579 "cwtl\n\t"
2580 "movl %eax,%ebx\n\t"
2581 "sarl $31,%ebx");
2582 break;
2583 case 32:
2584 EMIT_ASM32 (i386_ext_32,
2585 "movl %eax,%ebx\n\t"
2586 "sarl $31,%ebx");
2587 break;
2588 default:
2589 emit_error = 1;
2590 }
2591}
2592
2593static void
2594i386_emit_log_not (void)
2595{
2596 EMIT_ASM32 (i386_log_not,
2597 "or %ebx,%eax\n\t"
2598 "test %eax,%eax\n\t"
2599 "sete %cl\n\t"
2600 "xor %ebx,%ebx\n\t"
2601 "movzbl %cl,%eax");
2602}
2603
2604static void
2605i386_emit_bit_and (void)
2606{
2607 EMIT_ASM32 (i386_and,
2608 "and (%esp),%eax\n\t"
2609 "and 0x4(%esp),%ebx\n\t"
2610 "lea 0x8(%esp),%esp");
2611}
2612
2613static void
2614i386_emit_bit_or (void)
2615{
2616 EMIT_ASM32 (i386_or,
2617 "or (%esp),%eax\n\t"
2618 "or 0x4(%esp),%ebx\n\t"
2619 "lea 0x8(%esp),%esp");
2620}
2621
2622static void
2623i386_emit_bit_xor (void)
2624{
2625 EMIT_ASM32 (i386_xor,
2626 "xor (%esp),%eax\n\t"
2627 "xor 0x4(%esp),%ebx\n\t"
2628 "lea 0x8(%esp),%esp");
2629}
2630
2631static void
2632i386_emit_bit_not (void)
2633{
2634 EMIT_ASM32 (i386_bit_not,
2635 "xor $0xffffffff,%eax\n\t"
2636 "xor $0xffffffff,%ebx\n\t");
2637}
2638
2639static void
2640i386_emit_equal (void)
2641{
2642 EMIT_ASM32 (i386_equal,
2643 "cmpl %ebx,4(%esp)\n\t"
2644 "jne .Li386_equal_false\n\t"
2645 "cmpl %eax,(%esp)\n\t"
2646 "je .Li386_equal_true\n\t"
2647 ".Li386_equal_false:\n\t"
2648 "xor %eax,%eax\n\t"
2649 "jmp .Li386_equal_end\n\t"
2650 ".Li386_equal_true:\n\t"
2651 "mov $1,%eax\n\t"
2652 ".Li386_equal_end:\n\t"
2653 "xor %ebx,%ebx\n\t"
2654 "lea 0x8(%esp),%esp");
2655}
2656
2657static void
2658i386_emit_less_signed (void)
2659{
2660 EMIT_ASM32 (i386_less_signed,
2661 "cmpl %ebx,4(%esp)\n\t"
2662 "jl .Li386_less_signed_true\n\t"
2663 "jne .Li386_less_signed_false\n\t"
2664 "cmpl %eax,(%esp)\n\t"
2665 "jl .Li386_less_signed_true\n\t"
2666 ".Li386_less_signed_false:\n\t"
2667 "xor %eax,%eax\n\t"
2668 "jmp .Li386_less_signed_end\n\t"
2669 ".Li386_less_signed_true:\n\t"
2670 "mov $1,%eax\n\t"
2671 ".Li386_less_signed_end:\n\t"
2672 "xor %ebx,%ebx\n\t"
2673 "lea 0x8(%esp),%esp");
2674}
2675
2676static void
2677i386_emit_less_unsigned (void)
2678{
2679 EMIT_ASM32 (i386_less_unsigned,
2680 "cmpl %ebx,4(%esp)\n\t"
2681 "jb .Li386_less_unsigned_true\n\t"
2682 "jne .Li386_less_unsigned_false\n\t"
2683 "cmpl %eax,(%esp)\n\t"
2684 "jb .Li386_less_unsigned_true\n\t"
2685 ".Li386_less_unsigned_false:\n\t"
2686 "xor %eax,%eax\n\t"
2687 "jmp .Li386_less_unsigned_end\n\t"
2688 ".Li386_less_unsigned_true:\n\t"
2689 "mov $1,%eax\n\t"
2690 ".Li386_less_unsigned_end:\n\t"
2691 "xor %ebx,%ebx\n\t"
2692 "lea 0x8(%esp),%esp");
2693}
2694
2695static void
2696i386_emit_ref (int size)
2697{
2698 switch (size)
2699 {
2700 case 1:
2701 EMIT_ASM32 (i386_ref1,
2702 "movb (%eax),%al");
2703 break;
2704 case 2:
2705 EMIT_ASM32 (i386_ref2,
2706 "movw (%eax),%ax");
2707 break;
2708 case 4:
2709 EMIT_ASM32 (i386_ref4,
2710 "movl (%eax),%eax");
2711 break;
2712 case 8:
2713 EMIT_ASM32 (i386_ref8,
2714 "movl 4(%eax),%ebx\n\t"
2715 "movl (%eax),%eax");
2716 break;
2717 }
2718}
2719
2720static void
2721i386_emit_if_goto (int *offset_p, int *size_p)
2722{
2723 EMIT_ASM32 (i386_if_goto,
2724 "mov %eax,%ecx\n\t"
2725 "or %ebx,%ecx\n\t"
2726 "pop %eax\n\t"
2727 "pop %ebx\n\t"
2728 "cmpl $0,%ecx\n\t"
2729 /* Don't trust the assembler to choose the right jump */
2730 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2731
2732 if (offset_p)
2733 *offset_p = 11; /* be sure that this matches the sequence above */
2734 if (size_p)
2735 *size_p = 4;
2736}
2737
2738static void
2739i386_emit_goto (int *offset_p, int *size_p)
2740{
2741 EMIT_ASM32 (i386_goto,
2742 /* Don't trust the assembler to choose the right jump */
2743 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2744 if (offset_p)
2745 *offset_p = 1;
2746 if (size_p)
2747 *size_p = 4;
2748}
2749
2750static void
2751i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2752{
2753 int diff = (to - (from + size));
2754 unsigned char buf[sizeof (int)];
2755
2756 /* We're only doing 4-byte sizes at the moment. */
2757 if (size != 4)
2758 {
2759 emit_error = 1;
2760 return;
2761 }
2762
2763 memcpy (buf, &diff, sizeof (int));
2764 write_inferior_memory (from, buf, sizeof (int));
2765}
2766
2767static void
4e29fb54 2768i386_emit_const (LONGEST num)
6a271cae
PA
2769{
2770 unsigned char buf[16];
b00ad6ff 2771 int i, hi, lo;
6a271cae
PA
2772 CORE_ADDR buildaddr = current_insn_ptr;
2773
2774 i = 0;
2775 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2776 lo = num & 0xffffffff;
2777 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2778 i += 4;
2779 hi = ((num >> 32) & 0xffffffff);
2780 if (hi)
2781 {
2782 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2783 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2784 i += 4;
2785 }
2786 else
2787 {
2788 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2789 }
2790 append_insns (&buildaddr, i, buf);
2791 current_insn_ptr = buildaddr;
2792}
2793
2794static void
2795i386_emit_call (CORE_ADDR fn)
2796{
2797 unsigned char buf[16];
2798 int i, offset;
2799 CORE_ADDR buildaddr;
2800
2801 buildaddr = current_insn_ptr;
2802 i = 0;
2803 buf[i++] = 0xe8; /* call <reladdr> */
2804 offset = ((int) fn) - (buildaddr + 5);
2805 memcpy (buf + 1, &offset, 4);
2806 append_insns (&buildaddr, 5, buf);
2807 current_insn_ptr = buildaddr;
2808}
2809
2810static void
2811i386_emit_reg (int reg)
2812{
2813 unsigned char buf[16];
2814 int i;
2815 CORE_ADDR buildaddr;
2816
2817 EMIT_ASM32 (i386_reg_a,
2818 "sub $0x8,%esp");
2819 buildaddr = current_insn_ptr;
2820 i = 0;
2821 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2822 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2823 i += 4;
2824 append_insns (&buildaddr, i, buf);
2825 current_insn_ptr = buildaddr;
2826 EMIT_ASM32 (i386_reg_b,
2827 "mov %eax,4(%esp)\n\t"
2828 "mov 8(%ebp),%eax\n\t"
2829 "mov %eax,(%esp)");
2830 i386_emit_call (get_raw_reg_func_addr ());
2831 EMIT_ASM32 (i386_reg_c,
2832 "xor %ebx,%ebx\n\t"
2833 "lea 0x8(%esp),%esp");
2834}
2835
2836static void
2837i386_emit_pop (void)
2838{
2839 EMIT_ASM32 (i386_pop,
2840 "pop %eax\n\t"
2841 "pop %ebx");
2842}
2843
2844static void
2845i386_emit_stack_flush (void)
2846{
2847 EMIT_ASM32 (i386_stack_flush,
2848 "push %ebx\n\t"
2849 "push %eax");
2850}
2851
2852static void
2853i386_emit_zero_ext (int arg)
2854{
2855 switch (arg)
2856 {
2857 case 8:
2858 EMIT_ASM32 (i386_zero_ext_8,
2859 "and $0xff,%eax\n\t"
2860 "xor %ebx,%ebx");
2861 break;
2862 case 16:
2863 EMIT_ASM32 (i386_zero_ext_16,
2864 "and $0xffff,%eax\n\t"
2865 "xor %ebx,%ebx");
2866 break;
2867 case 32:
2868 EMIT_ASM32 (i386_zero_ext_32,
2869 "xor %ebx,%ebx");
2870 break;
2871 default:
2872 emit_error = 1;
2873 }
2874}
2875
2876static void
2877i386_emit_swap (void)
2878{
2879 EMIT_ASM32 (i386_swap,
2880 "mov %eax,%ecx\n\t"
2881 "mov %ebx,%edx\n\t"
2882 "pop %eax\n\t"
2883 "pop %ebx\n\t"
2884 "push %edx\n\t"
2885 "push %ecx");
2886}
2887
2888static void
2889i386_emit_stack_adjust (int n)
2890{
2891 unsigned char buf[16];
2892 int i;
2893 CORE_ADDR buildaddr = current_insn_ptr;
2894
2895 i = 0;
2896 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2897 buf[i++] = 0x64;
2898 buf[i++] = 0x24;
2899 buf[i++] = n * 8;
2900 append_insns (&buildaddr, i, buf);
2901 current_insn_ptr = buildaddr;
2902}
2903
2904/* FN's prototype is `LONGEST(*fn)(int)'. */
2905
2906static void
2907i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2908{
2909 unsigned char buf[16];
2910 int i;
2911 CORE_ADDR buildaddr;
2912
2913 EMIT_ASM32 (i386_int_call_1_a,
2914 /* Reserve a bit of stack space. */
2915 "sub $0x8,%esp");
2916 /* Put the one argument on the stack. */
2917 buildaddr = current_insn_ptr;
2918 i = 0;
2919 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2920 buf[i++] = 0x04;
2921 buf[i++] = 0x24;
b00ad6ff 2922 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2923 i += 4;
2924 append_insns (&buildaddr, i, buf);
2925 current_insn_ptr = buildaddr;
2926 i386_emit_call (fn);
2927 EMIT_ASM32 (i386_int_call_1_c,
2928 "mov %edx,%ebx\n\t"
2929 "lea 0x8(%esp),%esp");
2930}
2931
4e29fb54 2932/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2933
2934static void
2935i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2936{
2937 unsigned char buf[16];
2938 int i;
2939 CORE_ADDR buildaddr;
2940
2941 EMIT_ASM32 (i386_void_call_2_a,
2942 /* Preserve %eax only; we don't have to worry about %ebx. */
2943 "push %eax\n\t"
2944 /* Reserve a bit of stack space for arguments. */
2945 "sub $0x10,%esp\n\t"
2946 /* Copy "top" to the second argument position. (Note that
2947 we can't assume function won't scribble on its
2948 arguments, so don't try to restore from this.) */
2949 "mov %eax,4(%esp)\n\t"
2950 "mov %ebx,8(%esp)");
2951 /* Put the first argument on the stack. */
2952 buildaddr = current_insn_ptr;
2953 i = 0;
2954 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2955 buf[i++] = 0x04;
2956 buf[i++] = 0x24;
b00ad6ff 2957 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2958 i += 4;
2959 append_insns (&buildaddr, i, buf);
2960 current_insn_ptr = buildaddr;
2961 i386_emit_call (fn);
2962 EMIT_ASM32 (i386_void_call_2_b,
2963 "lea 0x10(%esp),%esp\n\t"
2964 /* Restore original stack top. */
2965 "pop %eax");
2966}
2967
6b9801d4
SS
2968
2969void
2970i386_emit_eq_goto (int *offset_p, int *size_p)
2971{
2972 EMIT_ASM32 (eq,
2973 /* Check low half first, more likely to be decider */
2974 "cmpl %eax,(%esp)\n\t"
2975 "jne .Leq_fallthru\n\t"
2976 "cmpl %ebx,4(%esp)\n\t"
2977 "jne .Leq_fallthru\n\t"
2978 "lea 0x8(%esp),%esp\n\t"
2979 "pop %eax\n\t"
2980 "pop %ebx\n\t"
2981 /* jmp, but don't trust the assembler to choose the right jump */
2982 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2983 ".Leq_fallthru:\n\t"
2984 "lea 0x8(%esp),%esp\n\t"
2985 "pop %eax\n\t"
2986 "pop %ebx");
2987
2988 if (offset_p)
2989 *offset_p = 18;
2990 if (size_p)
2991 *size_p = 4;
2992}
2993
2994void
2995i386_emit_ne_goto (int *offset_p, int *size_p)
2996{
2997 EMIT_ASM32 (ne,
2998 /* Check low half first, more likely to be decider */
2999 "cmpl %eax,(%esp)\n\t"
3000 "jne .Lne_jump\n\t"
3001 "cmpl %ebx,4(%esp)\n\t"
3002 "je .Lne_fallthru\n\t"
3003 ".Lne_jump:\n\t"
3004 "lea 0x8(%esp),%esp\n\t"
3005 "pop %eax\n\t"
3006 "pop %ebx\n\t"
3007 /* jmp, but don't trust the assembler to choose the right jump */
3008 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3009 ".Lne_fallthru:\n\t"
3010 "lea 0x8(%esp),%esp\n\t"
3011 "pop %eax\n\t"
3012 "pop %ebx");
3013
3014 if (offset_p)
3015 *offset_p = 18;
3016 if (size_p)
3017 *size_p = 4;
3018}
3019
3020void
3021i386_emit_lt_goto (int *offset_p, int *size_p)
3022{
3023 EMIT_ASM32 (lt,
3024 "cmpl %ebx,4(%esp)\n\t"
3025 "jl .Llt_jump\n\t"
3026 "jne .Llt_fallthru\n\t"
3027 "cmpl %eax,(%esp)\n\t"
3028 "jnl .Llt_fallthru\n\t"
3029 ".Llt_jump:\n\t"
3030 "lea 0x8(%esp),%esp\n\t"
3031 "pop %eax\n\t"
3032 "pop %ebx\n\t"
3033 /* jmp, but don't trust the assembler to choose the right jump */
3034 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3035 ".Llt_fallthru:\n\t"
3036 "lea 0x8(%esp),%esp\n\t"
3037 "pop %eax\n\t"
3038 "pop %ebx");
3039
3040 if (offset_p)
3041 *offset_p = 20;
3042 if (size_p)
3043 *size_p = 4;
3044}
3045
3046void
3047i386_emit_le_goto (int *offset_p, int *size_p)
3048{
3049 EMIT_ASM32 (le,
3050 "cmpl %ebx,4(%esp)\n\t"
3051 "jle .Lle_jump\n\t"
3052 "jne .Lle_fallthru\n\t"
3053 "cmpl %eax,(%esp)\n\t"
3054 "jnle .Lle_fallthru\n\t"
3055 ".Lle_jump:\n\t"
3056 "lea 0x8(%esp),%esp\n\t"
3057 "pop %eax\n\t"
3058 "pop %ebx\n\t"
3059 /* jmp, but don't trust the assembler to choose the right jump */
3060 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3061 ".Lle_fallthru:\n\t"
3062 "lea 0x8(%esp),%esp\n\t"
3063 "pop %eax\n\t"
3064 "pop %ebx");
3065
3066 if (offset_p)
3067 *offset_p = 20;
3068 if (size_p)
3069 *size_p = 4;
3070}
3071
3072void
3073i386_emit_gt_goto (int *offset_p, int *size_p)
3074{
3075 EMIT_ASM32 (gt,
3076 "cmpl %ebx,4(%esp)\n\t"
3077 "jg .Lgt_jump\n\t"
3078 "jne .Lgt_fallthru\n\t"
3079 "cmpl %eax,(%esp)\n\t"
3080 "jng .Lgt_fallthru\n\t"
3081 ".Lgt_jump:\n\t"
3082 "lea 0x8(%esp),%esp\n\t"
3083 "pop %eax\n\t"
3084 "pop %ebx\n\t"
3085 /* jmp, but don't trust the assembler to choose the right jump */
3086 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3087 ".Lgt_fallthru:\n\t"
3088 "lea 0x8(%esp),%esp\n\t"
3089 "pop %eax\n\t"
3090 "pop %ebx");
3091
3092 if (offset_p)
3093 *offset_p = 20;
3094 if (size_p)
3095 *size_p = 4;
3096}
3097
3098void
3099i386_emit_ge_goto (int *offset_p, int *size_p)
3100{
3101 EMIT_ASM32 (ge,
3102 "cmpl %ebx,4(%esp)\n\t"
3103 "jge .Lge_jump\n\t"
3104 "jne .Lge_fallthru\n\t"
3105 "cmpl %eax,(%esp)\n\t"
3106 "jnge .Lge_fallthru\n\t"
3107 ".Lge_jump:\n\t"
3108 "lea 0x8(%esp),%esp\n\t"
3109 "pop %eax\n\t"
3110 "pop %ebx\n\t"
3111 /* jmp, but don't trust the assembler to choose the right jump */
3112 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3113 ".Lge_fallthru:\n\t"
3114 "lea 0x8(%esp),%esp\n\t"
3115 "pop %eax\n\t"
3116 "pop %ebx");
3117
3118 if (offset_p)
3119 *offset_p = 20;
3120 if (size_p)
3121 *size_p = 4;
3122}
3123
6a271cae
PA
3124struct emit_ops i386_emit_ops =
3125 {
3126 i386_emit_prologue,
3127 i386_emit_epilogue,
3128 i386_emit_add,
3129 i386_emit_sub,
3130 i386_emit_mul,
3131 i386_emit_lsh,
3132 i386_emit_rsh_signed,
3133 i386_emit_rsh_unsigned,
3134 i386_emit_ext,
3135 i386_emit_log_not,
3136 i386_emit_bit_and,
3137 i386_emit_bit_or,
3138 i386_emit_bit_xor,
3139 i386_emit_bit_not,
3140 i386_emit_equal,
3141 i386_emit_less_signed,
3142 i386_emit_less_unsigned,
3143 i386_emit_ref,
3144 i386_emit_if_goto,
3145 i386_emit_goto,
3146 i386_write_goto_address,
3147 i386_emit_const,
3148 i386_emit_call,
3149 i386_emit_reg,
3150 i386_emit_pop,
3151 i386_emit_stack_flush,
3152 i386_emit_zero_ext,
3153 i386_emit_swap,
3154 i386_emit_stack_adjust,
3155 i386_emit_int_call_1,
6b9801d4
SS
3156 i386_emit_void_call_2,
3157 i386_emit_eq_goto,
3158 i386_emit_ne_goto,
3159 i386_emit_lt_goto,
3160 i386_emit_le_goto,
3161 i386_emit_gt_goto,
3162 i386_emit_ge_goto
6a271cae
PA
3163 };
3164
3165
3166static struct emit_ops *
3167x86_emit_ops (void)
3168{
3169#ifdef __x86_64__
3170 int use_64bit = register_size (0) == 8;
3171
3172 if (use_64bit)
3173 return &amd64_emit_ops;
3174 else
3175#endif
3176 return &i386_emit_ops;
3177}
3178
c2d6af84
PA
3179static int
3180x86_supports_range_stepping (void)
3181{
3182 return 1;
3183}
3184
d0722149
DE
3185/* This is initialized assuming an amd64 target.
3186 x86_arch_setup will correct it for i386 or amd64 targets. */
3187
3188struct linux_target_ops the_low_target =
3189{
3190 x86_arch_setup,
3191 -1,
3192 NULL,
3193 NULL,
3194 NULL,
1faeff08 3195 NULL,
c14dfd32 3196 NULL, /* fetch_register */
d0722149
DE
3197 x86_get_pc,
3198 x86_set_pc,
3199 x86_breakpoint,
3200 x86_breakpoint_len,
3201 NULL,
3202 1,
3203 x86_breakpoint_at,
aa5ca48f
DE
3204 x86_insert_point,
3205 x86_remove_point,
3206 x86_stopped_by_watchpoint,
3207 x86_stopped_data_address,
d0722149
DE
3208 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3209 native i386 case (no registers smaller than an xfer unit), and are not
3210 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3211 NULL,
3212 NULL,
3213 /* need to fix up i386 siginfo if host is amd64 */
3214 x86_siginfo_fixup,
aa5ca48f
DE
3215 x86_linux_new_process,
3216 x86_linux_new_thread,
1570b33e 3217 x86_linux_prepare_to_resume,
219f2f23 3218 x86_linux_process_qsupported,
fa593d66
PA
3219 x86_supports_tracepoints,
3220 x86_get_thread_area,
6a271cae 3221 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3222 x86_emit_ops,
3223 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3224 x86_supports_range_stepping,
d0722149 3225};
This page took 0.551313 seconds and 4 git commands to generate.