Probe catch syscall support
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
618f726f 3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
58b4daa5 41#include "agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
5826e159 75#include "nat/gdb_ptrace.h"
1570b33e
L
76#include <sys/uio.h>
77
d0722149
DE
78#ifndef PTRACE_GET_THREAD_AREA
79#define PTRACE_GET_THREAD_AREA 25
80#endif
81
82/* This definition comes from prctl.h, but some kernels may not have it. */
83#ifndef PTRACE_ARCH_PRCTL
84#define PTRACE_ARCH_PRCTL 30
85#endif
86
87/* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89#ifndef ARCH_GET_FS
90#define ARCH_SET_GS 0x1001
91#define ARCH_SET_FS 0x1002
92#define ARCH_GET_FS 0x1003
93#define ARCH_GET_GS 0x1004
94#endif
95
aa5ca48f
DE
96/* Per-process arch-specific data we want to keep. */
97
98struct arch_process_info
99{
df7e5265 100 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
101};
102
d0722149
DE
103#ifdef __x86_64__
104
105/* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108static /*const*/ int i386_regmap[] =
109{
110 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
111 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
112 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
113 DS * 8, ES * 8, FS * 8, GS * 8
114};
115
116#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117
118/* So code below doesn't have to care, i386 or amd64. */
119#define ORIG_EAX ORIG_RAX
bc9540e8 120#define REGSIZE 8
d0722149
DE
121
122static const int x86_64_regmap[] =
123{
124 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
125 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
126 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
127 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
128 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
129 DS * 8, ES * 8, FS * 8, GS * 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
133 -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 ORIG_RAX * 8,
136 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
137 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
138 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
147};
148
149#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 150#define X86_64_USER_REGS (GS + 1)
d0722149
DE
151
152#else /* ! __x86_64__ */
153
154/* Mapping between the general-purpose registers in `struct user'
155 format and GDB's register array layout. */
156static /*const*/ int i386_regmap[] =
157{
158 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
159 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
160 EIP * 4, EFL * 4, CS * 4, SS * 4,
161 DS * 4, ES * 4, FS * 4, GS * 4
162};
163
164#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
bc9540e8
PA
166#define REGSIZE 4
167
d0722149 168#endif
3aee8918
PA
169
170#ifdef __x86_64__
171
172/* Returns true if the current inferior belongs to a x86-64 process,
173 per the tdesc. */
174
175static int
176is_64bit_tdesc (void)
177{
0bfdf32f 178 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
179
180 return register_size (regcache->tdesc, 0) == 8;
181}
182
183#endif
184
d0722149
DE
185\f
186/* Called by libthread_db. */
187
188ps_err_e
189ps_get_thread_area (const struct ps_prochandle *ph,
190 lwpid_t lwpid, int idx, void **base)
191{
192#ifdef __x86_64__
3aee8918 193 int use_64bit = is_64bit_tdesc ();
d0722149
DE
194
195 if (use_64bit)
196 {
197 switch (idx)
198 {
199 case FS:
200 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
201 return PS_OK;
202 break;
203 case GS:
204 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
205 return PS_OK;
206 break;
207 default:
208 return PS_BADADDR;
209 }
210 return PS_ERR;
211 }
212#endif
213
214 {
215 unsigned int desc[4];
216
217 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
218 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
219 return PS_ERR;
220
d1ec4ce7
DE
221 /* Ensure we properly extend the value to 64-bits for x86_64. */
222 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
223 return PS_OK;
224 }
225}
fa593d66
PA
226
227/* Get the thread area address. This is used to recognize which
228 thread is which when tracing with the in-process agent library. We
229 don't read anything from the address, and treat it as opaque; it's
230 the address itself that we assume is unique per-thread. */
231
232static int
233x86_get_thread_area (int lwpid, CORE_ADDR *addr)
234{
235#ifdef __x86_64__
3aee8918 236 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
237
238 if (use_64bit)
239 {
240 void *base;
241 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
242 {
243 *addr = (CORE_ADDR) (uintptr_t) base;
244 return 0;
245 }
246
247 return -1;
248 }
249#endif
250
251 {
252 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
253 struct thread_info *thr = get_lwp_thread (lwp);
254 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
255 unsigned int desc[4];
256 ULONGEST gs = 0;
257 const int reg_thread_area = 3; /* bits to scale down register value. */
258 int idx;
259
260 collect_register_by_name (regcache, "gs", &gs);
261
262 idx = gs >> reg_thread_area;
263
264 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 265 lwpid_of (thr),
493e2a69 266 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
267 return -1;
268
269 *addr = desc[1];
270 return 0;
271 }
272}
273
274
d0722149
DE
275\f
276static int
3aee8918 277x86_cannot_store_register (int regno)
d0722149 278{
3aee8918
PA
279#ifdef __x86_64__
280 if (is_64bit_tdesc ())
281 return 0;
282#endif
283
d0722149
DE
284 return regno >= I386_NUM_REGS;
285}
286
287static int
3aee8918 288x86_cannot_fetch_register (int regno)
d0722149 289{
3aee8918
PA
290#ifdef __x86_64__
291 if (is_64bit_tdesc ())
292 return 0;
293#endif
294
d0722149
DE
295 return regno >= I386_NUM_REGS;
296}
297
298static void
442ea881 299x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
300{
301 int i;
302
303#ifdef __x86_64__
3aee8918 304 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
305 {
306 for (i = 0; i < X86_64_NUM_REGS; i++)
307 if (x86_64_regmap[i] != -1)
442ea881 308 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
309 return;
310 }
9e0aa64f
JK
311
312 /* 32-bit inferior registers need to be zero-extended.
313 Callers would read uninitialized memory otherwise. */
314 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
315#endif
316
317 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 318 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 319
442ea881 320 collect_register_by_name (regcache, "orig_eax",
bc9540e8 321 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
322}
323
324static void
442ea881 325x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
326{
327 int i;
328
329#ifdef __x86_64__
3aee8918 330 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
331 {
332 for (i = 0; i < X86_64_NUM_REGS; i++)
333 if (x86_64_regmap[i] != -1)
442ea881 334 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
335 return;
336 }
337#endif
338
339 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 340 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 341
442ea881 342 supply_register_by_name (regcache, "orig_eax",
bc9540e8 343 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
344}
345
346static void
442ea881 347x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
348{
349#ifdef __x86_64__
442ea881 350 i387_cache_to_fxsave (regcache, buf);
d0722149 351#else
442ea881 352 i387_cache_to_fsave (regcache, buf);
d0722149
DE
353#endif
354}
355
356static void
442ea881 357x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
358{
359#ifdef __x86_64__
442ea881 360 i387_fxsave_to_cache (regcache, buf);
d0722149 361#else
442ea881 362 i387_fsave_to_cache (regcache, buf);
d0722149
DE
363#endif
364}
365
366#ifndef __x86_64__
367
368static void
442ea881 369x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 370{
442ea881 371 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
372}
373
374static void
442ea881 375x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 376{
442ea881 377 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
378}
379
380#endif
381
1570b33e
L
382static void
383x86_fill_xstateregset (struct regcache *regcache, void *buf)
384{
385 i387_cache_to_xsave (regcache, buf);
386}
387
388static void
389x86_store_xstateregset (struct regcache *regcache, const void *buf)
390{
391 i387_xsave_to_cache (regcache, buf);
392}
393
d0722149
DE
394/* ??? The non-biarch i386 case stores all the i387 regs twice.
395 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
396 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
397 doesn't work. IWBN to avoid the duplication in the case where it
398 does work. Maybe the arch_setup routine could check whether it works
3aee8918 399 and update the supported regsets accordingly. */
d0722149 400
3aee8918 401static struct regset_info x86_regsets[] =
d0722149
DE
402{
403#ifdef HAVE_PTRACE_GETREGS
1570b33e 404 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
405 GENERAL_REGS,
406 x86_fill_gregset, x86_store_gregset },
1570b33e
L
407 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
408 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
409# ifndef __x86_64__
410# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 411 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
412 EXTENDED_REGS,
413 x86_fill_fpxregset, x86_store_fpxregset },
414# endif
415# endif
1570b33e 416 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
417 FP_REGS,
418 x86_fill_fpregset, x86_store_fpregset },
419#endif /* HAVE_PTRACE_GETREGS */
50bc912a 420 NULL_REGSET
d0722149
DE
421};
422
423static CORE_ADDR
442ea881 424x86_get_pc (struct regcache *regcache)
d0722149 425{
3aee8918 426 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
427
428 if (use_64bit)
429 {
430 unsigned long pc;
442ea881 431 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
432 return (CORE_ADDR) pc;
433 }
434 else
435 {
436 unsigned int pc;
442ea881 437 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
438 return (CORE_ADDR) pc;
439 }
440}
441
442static void
442ea881 443x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 444{
3aee8918 445 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
446
447 if (use_64bit)
448 {
449 unsigned long newpc = pc;
442ea881 450 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
451 }
452 else
453 {
454 unsigned int newpc = pc;
442ea881 455 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
456 }
457}
458\f
dd373349 459static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
460#define x86_breakpoint_len 1
461
462static int
463x86_breakpoint_at (CORE_ADDR pc)
464{
465 unsigned char c;
466
fc7238bb 467 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
468 if (c == 0xCC)
469 return 1;
470
471 return 0;
472}
473\f
42995dbd 474/* Low-level function vector. */
df7e5265 475struct x86_dr_low_type x86_dr_low =
42995dbd 476 {
d33472ad
GB
477 x86_linux_dr_set_control,
478 x86_linux_dr_set_addr,
479 x86_linux_dr_get_addr,
480 x86_linux_dr_get_status,
481 x86_linux_dr_get_control,
42995dbd
GB
482 sizeof (void *),
483 };
aa5ca48f 484\f
90d74c30 485/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
486
487static int
802e8e6d
PA
488x86_supports_z_point_type (char z_type)
489{
490 switch (z_type)
491 {
492 case Z_PACKET_SW_BP:
493 case Z_PACKET_HW_BP:
494 case Z_PACKET_WRITE_WP:
495 case Z_PACKET_ACCESS_WP:
496 return 1;
497 default:
498 return 0;
499 }
500}
501
502static int
503x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
504 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
505{
506 struct process_info *proc = current_process ();
802e8e6d 507
aa5ca48f
DE
508 switch (type)
509 {
802e8e6d
PA
510 case raw_bkpt_type_hw:
511 case raw_bkpt_type_write_wp:
512 case raw_bkpt_type_access_wp:
a4165e94 513 {
802e8e6d
PA
514 enum target_hw_bp_type hw_type
515 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 516 struct x86_debug_reg_state *state
fe978cb0 517 = &proc->priv->arch_private->debug_reg_state;
a4165e94 518
df7e5265 519 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 520 }
961bd387 521
aa5ca48f
DE
522 default:
523 /* Unsupported. */
524 return 1;
525 }
526}
527
528static int
802e8e6d
PA
529x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
530 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
531{
532 struct process_info *proc = current_process ();
802e8e6d 533
aa5ca48f
DE
534 switch (type)
535 {
802e8e6d
PA
536 case raw_bkpt_type_hw:
537 case raw_bkpt_type_write_wp:
538 case raw_bkpt_type_access_wp:
a4165e94 539 {
802e8e6d
PA
540 enum target_hw_bp_type hw_type
541 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 542 struct x86_debug_reg_state *state
fe978cb0 543 = &proc->priv->arch_private->debug_reg_state;
a4165e94 544
df7e5265 545 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 546 }
aa5ca48f
DE
547 default:
548 /* Unsupported. */
549 return 1;
550 }
551}
552
553static int
554x86_stopped_by_watchpoint (void)
555{
556 struct process_info *proc = current_process ();
fe978cb0 557 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
558}
559
560static CORE_ADDR
561x86_stopped_data_address (void)
562{
563 struct process_info *proc = current_process ();
564 CORE_ADDR addr;
fe978cb0 565 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 566 &addr))
aa5ca48f
DE
567 return addr;
568 return 0;
569}
570\f
571/* Called when a new process is created. */
572
573static struct arch_process_info *
574x86_linux_new_process (void)
575{
ed859da7 576 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 577
df7e5265 578 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
579
580 return info;
581}
582
3a8a0396
DB
583/* Target routine for linux_new_fork. */
584
585static void
586x86_linux_new_fork (struct process_info *parent, struct process_info *child)
587{
588 /* These are allocated by linux_add_process. */
589 gdb_assert (parent->priv != NULL
590 && parent->priv->arch_private != NULL);
591 gdb_assert (child->priv != NULL
592 && child->priv->arch_private != NULL);
593
594 /* Linux kernel before 2.6.33 commit
595 72f674d203cd230426437cdcf7dd6f681dad8b0d
596 will inherit hardware debug registers from parent
597 on fork/vfork/clone. Newer Linux kernels create such tasks with
598 zeroed debug registers.
599
600 GDB core assumes the child inherits the watchpoints/hw
601 breakpoints of the parent, and will remove them all from the
602 forked off process. Copy the debug registers mirrors into the
603 new process so that all breakpoints and watchpoints can be
604 removed together. The debug registers mirror will become zeroed
605 in the end before detaching the forked off process, thus making
606 this compatible with older Linux kernels too. */
607
608 *child->priv->arch_private = *parent->priv->arch_private;
609}
610
70a0bb6b
GB
611/* See nat/x86-dregs.h. */
612
613struct x86_debug_reg_state *
614x86_debug_reg_state (pid_t pid)
615{
616 struct process_info *proc = find_process_pid (pid);
617
618 return &proc->priv->arch_private->debug_reg_state;
619}
aa5ca48f 620\f
d0722149
DE
621/* When GDBSERVER is built as a 64-bit application on linux, the
622 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
623 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
624 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
625 conversion in-place ourselves. */
626
d0722149
DE
627/* Convert a native/host siginfo object, into/from the siginfo in the
628 layout of the inferiors' architecture. Returns true if any
629 conversion was done; false otherwise. If DIRECTION is 1, then copy
630 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
631 INF. */
632
633static int
8adce034 634x86_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
d0722149
DE
635{
636#ifdef __x86_64__
760256f9 637 unsigned int machine;
0bfdf32f 638 int tid = lwpid_of (current_thread);
760256f9
PA
639 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
640
d0722149 641 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 642 if (!is_64bit_tdesc ())
c23bbc1c
WT
643 return amd64_linux_siginfo_fixup_common (native, inf, direction,
644 FIXUP_32);
c92b5177 645 /* No fixup for native x32 GDB. */
760256f9 646 else if (!is_elf64 && sizeof (void *) == 8)
c23bbc1c
WT
647 return amd64_linux_siginfo_fixup_common (native, inf, direction,
648 FIXUP_X32);
d0722149
DE
649#endif
650
651 return 0;
652}
653\f
1570b33e
L
654static int use_xml;
655
3aee8918
PA
656/* Format of XSAVE extended state is:
657 struct
658 {
659 fxsave_bytes[0..463]
660 sw_usable_bytes[464..511]
661 xstate_hdr_bytes[512..575]
662 avx_bytes[576..831]
663 future_state etc
664 };
665
666 Same memory layout will be used for the coredump NT_X86_XSTATE
667 representing the XSAVE extended state registers.
668
669 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
670 extended state mask, which is the same as the extended control register
671 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
672 together with the mask saved in the xstate_hdr_bytes to determine what
673 states the processor/OS supports and what state, used or initialized,
674 the process/thread is in. */
675#define I386_LINUX_XSAVE_XCR0_OFFSET 464
676
677/* Does the current host support the GETFPXREGS request? The header
678 file may or may not define it, and even if it is defined, the
679 kernel will return EIO if it's running on a pre-SSE processor. */
680int have_ptrace_getfpxregs =
681#ifdef HAVE_PTRACE_GETFPXREGS
682 -1
683#else
684 0
685#endif
686;
1570b33e 687
3aee8918
PA
688/* Get Linux/x86 target description from running target. */
689
690static const struct target_desc *
691x86_linux_read_description (void)
1570b33e 692{
3aee8918
PA
693 unsigned int machine;
694 int is_elf64;
a196ebeb 695 int xcr0_features;
3aee8918
PA
696 int tid;
697 static uint64_t xcr0;
3a13a53b 698 struct regset_info *regset;
1570b33e 699
0bfdf32f 700 tid = lwpid_of (current_thread);
1570b33e 701
3aee8918 702 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 703
3aee8918 704 if (sizeof (void *) == 4)
3a13a53b 705 {
3aee8918
PA
706 if (is_elf64 > 0)
707 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
708#ifndef __x86_64__
709 else if (machine == EM_X86_64)
710 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
711#endif
712 }
3a13a53b 713
3aee8918
PA
714#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
715 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
716 {
717 elf_fpxregset_t fpxregs;
3a13a53b 718
3aee8918 719 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 720 {
3aee8918
PA
721 have_ptrace_getfpxregs = 0;
722 have_ptrace_getregset = 0;
723 return tdesc_i386_mmx_linux;
3a13a53b 724 }
3aee8918
PA
725 else
726 have_ptrace_getfpxregs = 1;
3a13a53b 727 }
1570b33e
L
728#endif
729
730 if (!use_xml)
731 {
df7e5265 732 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 733
1570b33e
L
734 /* Don't use XML. */
735#ifdef __x86_64__
3aee8918
PA
736 if (machine == EM_X86_64)
737 return tdesc_amd64_linux_no_xml;
1570b33e 738 else
1570b33e 739#endif
3aee8918 740 return tdesc_i386_linux_no_xml;
1570b33e
L
741 }
742
1570b33e
L
743 if (have_ptrace_getregset == -1)
744 {
df7e5265 745 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 746 struct iovec iov;
1570b33e
L
747
748 iov.iov_base = xstateregs;
749 iov.iov_len = sizeof (xstateregs);
750
751 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
752 if (ptrace (PTRACE_GETREGSET, tid,
753 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
754 have_ptrace_getregset = 0;
755 else
1570b33e 756 {
3aee8918
PA
757 have_ptrace_getregset = 1;
758
759 /* Get XCR0 from XSAVE extended state. */
760 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
761 / sizeof (uint64_t))];
762
763 /* Use PTRACE_GETREGSET if it is available. */
764 for (regset = x86_regsets;
765 regset->fill_function != NULL; regset++)
766 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 767 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
768 else if (regset->type != GENERAL_REGS)
769 regset->size = 0;
1570b33e 770 }
1570b33e
L
771 }
772
3aee8918 773 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 774 xcr0_features = (have_ptrace_getregset
df7e5265 775 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 776
a196ebeb 777 if (xcr0_features)
3aee8918 778 x86_xcr0 = xcr0;
1570b33e 779
3aee8918
PA
780 if (machine == EM_X86_64)
781 {
1570b33e 782#ifdef __x86_64__
a196ebeb 783 if (is_elf64)
3aee8918 784 {
a196ebeb
WT
785 if (xcr0_features)
786 {
df7e5265 787 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 788 {
df7e5265 789 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
790 return tdesc_amd64_avx512_linux;
791
2b863f51
WT
792 case X86_XSTATE_AVX_MPX_MASK:
793 return tdesc_amd64_avx_mpx_linux;
794
df7e5265 795 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
796 return tdesc_amd64_mpx_linux;
797
df7e5265 798 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
799 return tdesc_amd64_avx_linux;
800
801 default:
802 return tdesc_amd64_linux;
803 }
804 }
4d47af5c 805 else
a196ebeb 806 return tdesc_amd64_linux;
3aee8918
PA
807 }
808 else
809 {
a196ebeb
WT
810 if (xcr0_features)
811 {
df7e5265 812 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 813 {
df7e5265 814 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
815 return tdesc_x32_avx512_linux;
816
df7e5265
GB
817 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
818 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
819 return tdesc_x32_avx_linux;
820
821 default:
822 return tdesc_x32_linux;
823 }
824 }
3aee8918 825 else
a196ebeb 826 return tdesc_x32_linux;
1570b33e 827 }
3aee8918 828#endif
1570b33e 829 }
3aee8918
PA
830 else
831 {
a196ebeb
WT
832 if (xcr0_features)
833 {
df7e5265 834 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 835 {
df7e5265 836 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
837 return tdesc_i386_avx512_linux;
838
df7e5265 839 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
840 return tdesc_i386_mpx_linux;
841
2b863f51
WT
842 case (X86_XSTATE_AVX_MPX_MASK):
843 return tdesc_i386_avx_mpx_linux;
844
df7e5265 845 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
846 return tdesc_i386_avx_linux;
847
848 default:
849 return tdesc_i386_linux;
850 }
851 }
3aee8918
PA
852 else
853 return tdesc_i386_linux;
854 }
855
856 gdb_assert_not_reached ("failed to return tdesc");
857}
858
859/* Callback for find_inferior. Stops iteration when a thread with a
860 given PID is found. */
861
862static int
863same_process_callback (struct inferior_list_entry *entry, void *data)
864{
865 int pid = *(int *) data;
866
867 return (ptid_get_pid (entry->id) == pid);
868}
869
870/* Callback for for_each_inferior. Calls the arch_setup routine for
871 each process. */
872
873static void
874x86_arch_setup_process_callback (struct inferior_list_entry *entry)
875{
876 int pid = ptid_get_pid (entry->id);
877
878 /* Look up any thread of this processes. */
0bfdf32f 879 current_thread
3aee8918
PA
880 = (struct thread_info *) find_inferior (&all_threads,
881 same_process_callback, &pid);
882
883 the_low_target.arch_setup ();
884}
885
886/* Update all the target description of all processes; a new GDB
887 connected, and it may or not support xml target descriptions. */
888
889static void
890x86_linux_update_xmltarget (void)
891{
0bfdf32f 892 struct thread_info *saved_thread = current_thread;
3aee8918
PA
893
894 /* Before changing the register cache's internal layout, flush the
895 contents of the current valid caches back to the threads, and
896 release the current regcache objects. */
897 regcache_release ();
898
899 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
900
0bfdf32f 901 current_thread = saved_thread;
1570b33e
L
902}
903
904/* Process qSupported query, "xmlRegisters=". Update the buffer size for
905 PTRACE_GETREGSET. */
906
907static void
06e03fff 908x86_linux_process_qsupported (char **features, int count)
1570b33e 909{
06e03fff
PA
910 int i;
911
1570b33e
L
912 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
913 with "i386" in qSupported query, it supports x86 XML target
914 descriptions. */
915 use_xml = 0;
06e03fff 916 for (i = 0; i < count; i++)
1570b33e 917 {
06e03fff 918 const char *feature = features[i];
1570b33e 919
06e03fff 920 if (startswith (feature, "xmlRegisters="))
1570b33e 921 {
06e03fff
PA
922 char *copy = xstrdup (feature + 13);
923 char *p;
924
925 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1570b33e 926 {
06e03fff
PA
927 if (strcmp (p, "i386") == 0)
928 {
929 use_xml = 1;
930 break;
931 }
1570b33e 932 }
1570b33e 933
06e03fff
PA
934 free (copy);
935 }
1570b33e 936 }
1570b33e
L
937 x86_linux_update_xmltarget ();
938}
939
3aee8918 940/* Common for x86/x86-64. */
d0722149 941
3aee8918
PA
942static struct regsets_info x86_regsets_info =
943 {
944 x86_regsets, /* regsets */
945 0, /* num_regsets */
946 NULL, /* disabled_regsets */
947 };
214d508e
L
948
949#ifdef __x86_64__
3aee8918
PA
950static struct regs_info amd64_linux_regs_info =
951 {
952 NULL, /* regset_bitmap */
953 NULL, /* usrregs_info */
954 &x86_regsets_info
955 };
d0722149 956#endif
3aee8918
PA
957static struct usrregs_info i386_linux_usrregs_info =
958 {
959 I386_NUM_REGS,
960 i386_regmap,
961 };
d0722149 962
3aee8918
PA
963static struct regs_info i386_linux_regs_info =
964 {
965 NULL, /* regset_bitmap */
966 &i386_linux_usrregs_info,
967 &x86_regsets_info
968 };
d0722149 969
3aee8918
PA
970const struct regs_info *
971x86_linux_regs_info (void)
972{
973#ifdef __x86_64__
974 if (is_64bit_tdesc ())
975 return &amd64_linux_regs_info;
976 else
977#endif
978 return &i386_linux_regs_info;
979}
d0722149 980
3aee8918
PA
981/* Initialize the target description for the architecture of the
982 inferior. */
1570b33e 983
3aee8918
PA
984static void
985x86_arch_setup (void)
986{
987 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
988}
989
82075af2
JS
990/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
991 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
992
993static void
994x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno, int *sysret)
995{
996 int use_64bit = register_size (regcache->tdesc, 0) == 8;
997
998 if (use_64bit)
999 {
1000 long l_sysno;
1001 long l_sysret;
1002
1003 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1004 collect_register_by_name (regcache, "rax", &l_sysret);
1005 *sysno = (int) l_sysno;
1006 *sysret = (int) l_sysret;
1007 }
1008 else
1009 {
1010 collect_register_by_name (regcache, "orig_eax", sysno);
1011 collect_register_by_name (regcache, "eax", sysret);
1012 }
1013}
1014
219f2f23
PA
1015static int
1016x86_supports_tracepoints (void)
1017{
1018 return 1;
1019}
1020
fa593d66
PA
1021static void
1022append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1023{
1024 write_inferior_memory (*to, buf, len);
1025 *to += len;
1026}
1027
1028static int
1029push_opcode (unsigned char *buf, char *op)
1030{
1031 unsigned char *buf_org = buf;
1032
1033 while (1)
1034 {
1035 char *endptr;
1036 unsigned long ul = strtoul (op, &endptr, 16);
1037
1038 if (endptr == op)
1039 break;
1040
1041 *buf++ = ul;
1042 op = endptr;
1043 }
1044
1045 return buf - buf_org;
1046}
1047
1048#ifdef __x86_64__
1049
1050/* Build a jump pad that saves registers and calls a collection
1051 function. Writes a jump instruction to the jump pad to
1052 JJUMPAD_INSN. The caller is responsible to write it in at the
1053 tracepoint address. */
1054
1055static int
1056amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1057 CORE_ADDR collector,
1058 CORE_ADDR lockaddr,
1059 ULONGEST orig_size,
1060 CORE_ADDR *jump_entry,
405f8e94
SS
1061 CORE_ADDR *trampoline,
1062 ULONGEST *trampoline_size,
fa593d66
PA
1063 unsigned char *jjump_pad_insn,
1064 ULONGEST *jjump_pad_insn_size,
1065 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1066 CORE_ADDR *adjusted_insn_addr_end,
1067 char *err)
fa593d66
PA
1068{
1069 unsigned char buf[40];
1070 int i, offset;
f4647387
YQ
1071 int64_t loffset;
1072
fa593d66
PA
1073 CORE_ADDR buildaddr = *jump_entry;
1074
1075 /* Build the jump pad. */
1076
1077 /* First, do tracepoint data collection. Save registers. */
1078 i = 0;
1079 /* Need to ensure stack pointer saved first. */
1080 buf[i++] = 0x54; /* push %rsp */
1081 buf[i++] = 0x55; /* push %rbp */
1082 buf[i++] = 0x57; /* push %rdi */
1083 buf[i++] = 0x56; /* push %rsi */
1084 buf[i++] = 0x52; /* push %rdx */
1085 buf[i++] = 0x51; /* push %rcx */
1086 buf[i++] = 0x53; /* push %rbx */
1087 buf[i++] = 0x50; /* push %rax */
1088 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1089 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1090 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1091 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1092 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1093 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1094 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1095 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1096 buf[i++] = 0x9c; /* pushfq */
1097 buf[i++] = 0x48; /* movl <addr>,%rdi */
1098 buf[i++] = 0xbf;
1099 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1100 i += sizeof (unsigned long);
1101 buf[i++] = 0x57; /* push %rdi */
1102 append_insns (&buildaddr, i, buf);
1103
1104 /* Stack space for the collecting_t object. */
1105 i = 0;
1106 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1107 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1108 memcpy (buf + i, &tpoint, 8);
1109 i += 8;
1110 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1111 i += push_opcode (&buf[i],
1112 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1113 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1114 append_insns (&buildaddr, i, buf);
1115
1116 /* spin-lock. */
1117 i = 0;
1118 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1119 memcpy (&buf[i], (void *) &lockaddr, 8);
1120 i += 8;
1121 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1122 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1123 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1124 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1125 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1126 append_insns (&buildaddr, i, buf);
1127
1128 /* Set up the gdb_collect call. */
1129 /* At this point, (stack pointer + 0x18) is the base of our saved
1130 register block. */
1131
1132 i = 0;
1133 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1134 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1135
1136 /* tpoint address may be 64-bit wide. */
1137 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1138 memcpy (buf + i, &tpoint, 8);
1139 i += 8;
1140 append_insns (&buildaddr, i, buf);
1141
1142 /* The collector function being in the shared library, may be
1143 >31-bits away off the jump pad. */
1144 i = 0;
1145 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1146 memcpy (buf + i, &collector, 8);
1147 i += 8;
1148 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1149 append_insns (&buildaddr, i, buf);
1150
1151 /* Clear the spin-lock. */
1152 i = 0;
1153 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1154 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1155 memcpy (buf + i, &lockaddr, 8);
1156 i += 8;
1157 append_insns (&buildaddr, i, buf);
1158
1159 /* Remove stack that had been used for the collect_t object. */
1160 i = 0;
1161 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1162 append_insns (&buildaddr, i, buf);
1163
1164 /* Restore register state. */
1165 i = 0;
1166 buf[i++] = 0x48; /* add $0x8,%rsp */
1167 buf[i++] = 0x83;
1168 buf[i++] = 0xc4;
1169 buf[i++] = 0x08;
1170 buf[i++] = 0x9d; /* popfq */
1171 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1172 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1173 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1174 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1175 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1176 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1177 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1178 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1179 buf[i++] = 0x58; /* pop %rax */
1180 buf[i++] = 0x5b; /* pop %rbx */
1181 buf[i++] = 0x59; /* pop %rcx */
1182 buf[i++] = 0x5a; /* pop %rdx */
1183 buf[i++] = 0x5e; /* pop %rsi */
1184 buf[i++] = 0x5f; /* pop %rdi */
1185 buf[i++] = 0x5d; /* pop %rbp */
1186 buf[i++] = 0x5c; /* pop %rsp */
1187 append_insns (&buildaddr, i, buf);
1188
1189 /* Now, adjust the original instruction to execute in the jump
1190 pad. */
1191 *adjusted_insn_addr = buildaddr;
1192 relocate_instruction (&buildaddr, tpaddr);
1193 *adjusted_insn_addr_end = buildaddr;
1194
1195 /* Finally, write a jump back to the program. */
f4647387
YQ
1196
1197 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1198 if (loffset > INT_MAX || loffset < INT_MIN)
1199 {
1200 sprintf (err,
1201 "E.Jump back from jump pad too far from tracepoint "
1202 "(offset 0x%" PRIx64 " > int32).", loffset);
1203 return 1;
1204 }
1205
1206 offset = (int) loffset;
fa593d66
PA
1207 memcpy (buf, jump_insn, sizeof (jump_insn));
1208 memcpy (buf + 1, &offset, 4);
1209 append_insns (&buildaddr, sizeof (jump_insn), buf);
1210
1211 /* The jump pad is now built. Wire in a jump to our jump pad. This
1212 is always done last (by our caller actually), so that we can
1213 install fast tracepoints with threads running. This relies on
1214 the agent's atomic write support. */
f4647387
YQ
1215 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1216 if (loffset > INT_MAX || loffset < INT_MIN)
1217 {
1218 sprintf (err,
1219 "E.Jump pad too far from tracepoint "
1220 "(offset 0x%" PRIx64 " > int32).", loffset);
1221 return 1;
1222 }
1223
1224 offset = (int) loffset;
1225
fa593d66
PA
1226 memcpy (buf, jump_insn, sizeof (jump_insn));
1227 memcpy (buf + 1, &offset, 4);
1228 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1229 *jjump_pad_insn_size = sizeof (jump_insn);
1230
1231 /* Return the end address of our pad. */
1232 *jump_entry = buildaddr;
1233
1234 return 0;
1235}
1236
1237#endif /* __x86_64__ */
1238
1239/* Build a jump pad that saves registers and calls a collection
1240 function. Writes a jump instruction to the jump pad to
1241 JJUMPAD_INSN. The caller is responsible to write it in at the
1242 tracepoint address. */
1243
1244static int
1245i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1246 CORE_ADDR collector,
1247 CORE_ADDR lockaddr,
1248 ULONGEST orig_size,
1249 CORE_ADDR *jump_entry,
405f8e94
SS
1250 CORE_ADDR *trampoline,
1251 ULONGEST *trampoline_size,
fa593d66
PA
1252 unsigned char *jjump_pad_insn,
1253 ULONGEST *jjump_pad_insn_size,
1254 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1255 CORE_ADDR *adjusted_insn_addr_end,
1256 char *err)
fa593d66
PA
1257{
1258 unsigned char buf[0x100];
1259 int i, offset;
1260 CORE_ADDR buildaddr = *jump_entry;
1261
1262 /* Build the jump pad. */
1263
1264 /* First, do tracepoint data collection. Save registers. */
1265 i = 0;
1266 buf[i++] = 0x60; /* pushad */
1267 buf[i++] = 0x68; /* push tpaddr aka $pc */
1268 *((int *)(buf + i)) = (int) tpaddr;
1269 i += 4;
1270 buf[i++] = 0x9c; /* pushf */
1271 buf[i++] = 0x1e; /* push %ds */
1272 buf[i++] = 0x06; /* push %es */
1273 buf[i++] = 0x0f; /* push %fs */
1274 buf[i++] = 0xa0;
1275 buf[i++] = 0x0f; /* push %gs */
1276 buf[i++] = 0xa8;
1277 buf[i++] = 0x16; /* push %ss */
1278 buf[i++] = 0x0e; /* push %cs */
1279 append_insns (&buildaddr, i, buf);
1280
1281 /* Stack space for the collecting_t object. */
1282 i = 0;
1283 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1284
1285 /* Build the object. */
1286 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1287 memcpy (buf + i, &tpoint, 4);
1288 i += 4;
1289 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1290
1291 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1292 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1293 append_insns (&buildaddr, i, buf);
1294
1295 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1296 If we cared for it, this could be using xchg alternatively. */
1297
1298 i = 0;
1299 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1300 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1301 %esp,<lockaddr> */
1302 memcpy (&buf[i], (void *) &lockaddr, 4);
1303 i += 4;
1304 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1305 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1306 append_insns (&buildaddr, i, buf);
1307
1308
1309 /* Set up arguments to the gdb_collect call. */
1310 i = 0;
1311 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1312 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1313 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1314 append_insns (&buildaddr, i, buf);
1315
1316 i = 0;
1317 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1318 append_insns (&buildaddr, i, buf);
1319
1320 i = 0;
1321 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1322 memcpy (&buf[i], (void *) &tpoint, 4);
1323 i += 4;
1324 append_insns (&buildaddr, i, buf);
1325
1326 buf[0] = 0xe8; /* call <reladdr> */
1327 offset = collector - (buildaddr + sizeof (jump_insn));
1328 memcpy (buf + 1, &offset, 4);
1329 append_insns (&buildaddr, 5, buf);
1330 /* Clean up after the call. */
1331 buf[0] = 0x83; /* add $0x8,%esp */
1332 buf[1] = 0xc4;
1333 buf[2] = 0x08;
1334 append_insns (&buildaddr, 3, buf);
1335
1336
1337 /* Clear the spin-lock. This would need the LOCK prefix on older
1338 broken archs. */
1339 i = 0;
1340 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1341 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1342 memcpy (buf + i, &lockaddr, 4);
1343 i += 4;
1344 append_insns (&buildaddr, i, buf);
1345
1346
1347 /* Remove stack that had been used for the collect_t object. */
1348 i = 0;
1349 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1350 append_insns (&buildaddr, i, buf);
1351
1352 i = 0;
1353 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1354 buf[i++] = 0xc4;
1355 buf[i++] = 0x04;
1356 buf[i++] = 0x17; /* pop %ss */
1357 buf[i++] = 0x0f; /* pop %gs */
1358 buf[i++] = 0xa9;
1359 buf[i++] = 0x0f; /* pop %fs */
1360 buf[i++] = 0xa1;
1361 buf[i++] = 0x07; /* pop %es */
405f8e94 1362 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1363 buf[i++] = 0x9d; /* popf */
1364 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1365 buf[i++] = 0xc4;
1366 buf[i++] = 0x04;
1367 buf[i++] = 0x61; /* popad */
1368 append_insns (&buildaddr, i, buf);
1369
1370 /* Now, adjust the original instruction to execute in the jump
1371 pad. */
1372 *adjusted_insn_addr = buildaddr;
1373 relocate_instruction (&buildaddr, tpaddr);
1374 *adjusted_insn_addr_end = buildaddr;
1375
1376 /* Write the jump back to the program. */
1377 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1378 memcpy (buf, jump_insn, sizeof (jump_insn));
1379 memcpy (buf + 1, &offset, 4);
1380 append_insns (&buildaddr, sizeof (jump_insn), buf);
1381
1382 /* The jump pad is now built. Wire in a jump to our jump pad. This
1383 is always done last (by our caller actually), so that we can
1384 install fast tracepoints with threads running. This relies on
1385 the agent's atomic write support. */
405f8e94
SS
1386 if (orig_size == 4)
1387 {
1388 /* Create a trampoline. */
1389 *trampoline_size = sizeof (jump_insn);
1390 if (!claim_trampoline_space (*trampoline_size, trampoline))
1391 {
1392 /* No trampoline space available. */
1393 strcpy (err,
1394 "E.Cannot allocate trampoline space needed for fast "
1395 "tracepoints on 4-byte instructions.");
1396 return 1;
1397 }
1398
1399 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1400 memcpy (buf, jump_insn, sizeof (jump_insn));
1401 memcpy (buf + 1, &offset, 4);
1402 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1403
1404 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1405 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1406 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1407 memcpy (buf + 2, &offset, 2);
1408 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1409 *jjump_pad_insn_size = sizeof (small_jump_insn);
1410 }
1411 else
1412 {
1413 /* Else use a 32-bit relative jump instruction. */
1414 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1415 memcpy (buf, jump_insn, sizeof (jump_insn));
1416 memcpy (buf + 1, &offset, 4);
1417 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1418 *jjump_pad_insn_size = sizeof (jump_insn);
1419 }
fa593d66
PA
1420
1421 /* Return the end address of our pad. */
1422 *jump_entry = buildaddr;
1423
1424 return 0;
1425}
1426
1427static int
1428x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1429 CORE_ADDR collector,
1430 CORE_ADDR lockaddr,
1431 ULONGEST orig_size,
1432 CORE_ADDR *jump_entry,
405f8e94
SS
1433 CORE_ADDR *trampoline,
1434 ULONGEST *trampoline_size,
fa593d66
PA
1435 unsigned char *jjump_pad_insn,
1436 ULONGEST *jjump_pad_insn_size,
1437 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1438 CORE_ADDR *adjusted_insn_addr_end,
1439 char *err)
fa593d66
PA
1440{
1441#ifdef __x86_64__
3aee8918 1442 if (is_64bit_tdesc ())
fa593d66
PA
1443 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1444 collector, lockaddr,
1445 orig_size, jump_entry,
405f8e94 1446 trampoline, trampoline_size,
fa593d66
PA
1447 jjump_pad_insn,
1448 jjump_pad_insn_size,
1449 adjusted_insn_addr,
405f8e94
SS
1450 adjusted_insn_addr_end,
1451 err);
fa593d66
PA
1452#endif
1453
1454 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1455 collector, lockaddr,
1456 orig_size, jump_entry,
405f8e94 1457 trampoline, trampoline_size,
fa593d66
PA
1458 jjump_pad_insn,
1459 jjump_pad_insn_size,
1460 adjusted_insn_addr,
405f8e94
SS
1461 adjusted_insn_addr_end,
1462 err);
1463}
1464
1465/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1466 architectures. */
1467
1468static int
1469x86_get_min_fast_tracepoint_insn_len (void)
1470{
1471 static int warned_about_fast_tracepoints = 0;
1472
1473#ifdef __x86_64__
1474 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1475 used for fast tracepoints. */
3aee8918 1476 if (is_64bit_tdesc ())
405f8e94
SS
1477 return 5;
1478#endif
1479
58b4daa5 1480 if (agent_loaded_p ())
405f8e94
SS
1481 {
1482 char errbuf[IPA_BUFSIZ];
1483
1484 errbuf[0] = '\0';
1485
1486 /* On x86, if trampolines are available, then 4-byte jump instructions
1487 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1488 with a 4-byte offset are used instead. */
1489 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1490 return 4;
1491 else
1492 {
1493 /* GDB has no channel to explain to user why a shorter fast
1494 tracepoint is not possible, but at least make GDBserver
1495 mention that something has gone awry. */
1496 if (!warned_about_fast_tracepoints)
1497 {
1498 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1499 warned_about_fast_tracepoints = 1;
1500 }
1501 return 5;
1502 }
1503 }
1504 else
1505 {
1506 /* Indicate that the minimum length is currently unknown since the IPA
1507 has not loaded yet. */
1508 return 0;
1509 }
fa593d66
PA
1510}
1511
6a271cae
PA
1512static void
1513add_insns (unsigned char *start, int len)
1514{
1515 CORE_ADDR buildaddr = current_insn_ptr;
1516
1517 if (debug_threads)
87ce2a04
DE
1518 debug_printf ("Adding %d bytes of insn at %s\n",
1519 len, paddress (buildaddr));
6a271cae
PA
1520
1521 append_insns (&buildaddr, len, start);
1522 current_insn_ptr = buildaddr;
1523}
1524
6a271cae
PA
1525/* Our general strategy for emitting code is to avoid specifying raw
1526 bytes whenever possible, and instead copy a block of inline asm
1527 that is embedded in the function. This is a little messy, because
1528 we need to keep the compiler from discarding what looks like dead
1529 code, plus suppress various warnings. */
1530
9e4344e5
PA
1531#define EMIT_ASM(NAME, INSNS) \
1532 do \
1533 { \
1534 extern unsigned char start_ ## NAME, end_ ## NAME; \
1535 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1536 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1537 "\t" "start_" #NAME ":" \
1538 "\t" INSNS "\n" \
1539 "\t" "end_" #NAME ":"); \
1540 } while (0)
6a271cae
PA
1541
1542#ifdef __x86_64__
1543
1544#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1545 do \
1546 { \
1547 extern unsigned char start_ ## NAME, end_ ## NAME; \
1548 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1549 __asm__ (".code32\n" \
1550 "\t" "jmp end_" #NAME "\n" \
1551 "\t" "start_" #NAME ":\n" \
1552 "\t" INSNS "\n" \
1553 "\t" "end_" #NAME ":\n" \
1554 ".code64\n"); \
1555 } while (0)
6a271cae
PA
1556
1557#else
1558
1559#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1560
1561#endif
1562
1563#ifdef __x86_64__
1564
1565static void
1566amd64_emit_prologue (void)
1567{
1568 EMIT_ASM (amd64_prologue,
1569 "pushq %rbp\n\t"
1570 "movq %rsp,%rbp\n\t"
1571 "sub $0x20,%rsp\n\t"
1572 "movq %rdi,-8(%rbp)\n\t"
1573 "movq %rsi,-16(%rbp)");
1574}
1575
1576
1577static void
1578amd64_emit_epilogue (void)
1579{
1580 EMIT_ASM (amd64_epilogue,
1581 "movq -16(%rbp),%rdi\n\t"
1582 "movq %rax,(%rdi)\n\t"
1583 "xor %rax,%rax\n\t"
1584 "leave\n\t"
1585 "ret");
1586}
1587
1588static void
1589amd64_emit_add (void)
1590{
1591 EMIT_ASM (amd64_add,
1592 "add (%rsp),%rax\n\t"
1593 "lea 0x8(%rsp),%rsp");
1594}
1595
1596static void
1597amd64_emit_sub (void)
1598{
1599 EMIT_ASM (amd64_sub,
1600 "sub %rax,(%rsp)\n\t"
1601 "pop %rax");
1602}
1603
1604static void
1605amd64_emit_mul (void)
1606{
1607 emit_error = 1;
1608}
1609
1610static void
1611amd64_emit_lsh (void)
1612{
1613 emit_error = 1;
1614}
1615
1616static void
1617amd64_emit_rsh_signed (void)
1618{
1619 emit_error = 1;
1620}
1621
1622static void
1623amd64_emit_rsh_unsigned (void)
1624{
1625 emit_error = 1;
1626}
1627
1628static void
1629amd64_emit_ext (int arg)
1630{
1631 switch (arg)
1632 {
1633 case 8:
1634 EMIT_ASM (amd64_ext_8,
1635 "cbtw\n\t"
1636 "cwtl\n\t"
1637 "cltq");
1638 break;
1639 case 16:
1640 EMIT_ASM (amd64_ext_16,
1641 "cwtl\n\t"
1642 "cltq");
1643 break;
1644 case 32:
1645 EMIT_ASM (amd64_ext_32,
1646 "cltq");
1647 break;
1648 default:
1649 emit_error = 1;
1650 }
1651}
1652
1653static void
1654amd64_emit_log_not (void)
1655{
1656 EMIT_ASM (amd64_log_not,
1657 "test %rax,%rax\n\t"
1658 "sete %cl\n\t"
1659 "movzbq %cl,%rax");
1660}
1661
1662static void
1663amd64_emit_bit_and (void)
1664{
1665 EMIT_ASM (amd64_and,
1666 "and (%rsp),%rax\n\t"
1667 "lea 0x8(%rsp),%rsp");
1668}
1669
1670static void
1671amd64_emit_bit_or (void)
1672{
1673 EMIT_ASM (amd64_or,
1674 "or (%rsp),%rax\n\t"
1675 "lea 0x8(%rsp),%rsp");
1676}
1677
1678static void
1679amd64_emit_bit_xor (void)
1680{
1681 EMIT_ASM (amd64_xor,
1682 "xor (%rsp),%rax\n\t"
1683 "lea 0x8(%rsp),%rsp");
1684}
1685
1686static void
1687amd64_emit_bit_not (void)
1688{
1689 EMIT_ASM (amd64_bit_not,
1690 "xorq $0xffffffffffffffff,%rax");
1691}
1692
1693static void
1694amd64_emit_equal (void)
1695{
1696 EMIT_ASM (amd64_equal,
1697 "cmp %rax,(%rsp)\n\t"
1698 "je .Lamd64_equal_true\n\t"
1699 "xor %rax,%rax\n\t"
1700 "jmp .Lamd64_equal_end\n\t"
1701 ".Lamd64_equal_true:\n\t"
1702 "mov $0x1,%rax\n\t"
1703 ".Lamd64_equal_end:\n\t"
1704 "lea 0x8(%rsp),%rsp");
1705}
1706
1707static void
1708amd64_emit_less_signed (void)
1709{
1710 EMIT_ASM (amd64_less_signed,
1711 "cmp %rax,(%rsp)\n\t"
1712 "jl .Lamd64_less_signed_true\n\t"
1713 "xor %rax,%rax\n\t"
1714 "jmp .Lamd64_less_signed_end\n\t"
1715 ".Lamd64_less_signed_true:\n\t"
1716 "mov $1,%rax\n\t"
1717 ".Lamd64_less_signed_end:\n\t"
1718 "lea 0x8(%rsp),%rsp");
1719}
1720
1721static void
1722amd64_emit_less_unsigned (void)
1723{
1724 EMIT_ASM (amd64_less_unsigned,
1725 "cmp %rax,(%rsp)\n\t"
1726 "jb .Lamd64_less_unsigned_true\n\t"
1727 "xor %rax,%rax\n\t"
1728 "jmp .Lamd64_less_unsigned_end\n\t"
1729 ".Lamd64_less_unsigned_true:\n\t"
1730 "mov $1,%rax\n\t"
1731 ".Lamd64_less_unsigned_end:\n\t"
1732 "lea 0x8(%rsp),%rsp");
1733}
1734
1735static void
1736amd64_emit_ref (int size)
1737{
1738 switch (size)
1739 {
1740 case 1:
1741 EMIT_ASM (amd64_ref1,
1742 "movb (%rax),%al");
1743 break;
1744 case 2:
1745 EMIT_ASM (amd64_ref2,
1746 "movw (%rax),%ax");
1747 break;
1748 case 4:
1749 EMIT_ASM (amd64_ref4,
1750 "movl (%rax),%eax");
1751 break;
1752 case 8:
1753 EMIT_ASM (amd64_ref8,
1754 "movq (%rax),%rax");
1755 break;
1756 }
1757}
1758
1759static void
1760amd64_emit_if_goto (int *offset_p, int *size_p)
1761{
1762 EMIT_ASM (amd64_if_goto,
1763 "mov %rax,%rcx\n\t"
1764 "pop %rax\n\t"
1765 "cmp $0,%rcx\n\t"
1766 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1767 if (offset_p)
1768 *offset_p = 10;
1769 if (size_p)
1770 *size_p = 4;
1771}
1772
1773static void
1774amd64_emit_goto (int *offset_p, int *size_p)
1775{
1776 EMIT_ASM (amd64_goto,
1777 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1778 if (offset_p)
1779 *offset_p = 1;
1780 if (size_p)
1781 *size_p = 4;
1782}
1783
1784static void
1785amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1786{
1787 int diff = (to - (from + size));
1788 unsigned char buf[sizeof (int)];
1789
1790 if (size != 4)
1791 {
1792 emit_error = 1;
1793 return;
1794 }
1795
1796 memcpy (buf, &diff, sizeof (int));
1797 write_inferior_memory (from, buf, sizeof (int));
1798}
1799
1800static void
4e29fb54 1801amd64_emit_const (LONGEST num)
6a271cae
PA
1802{
1803 unsigned char buf[16];
1804 int i;
1805 CORE_ADDR buildaddr = current_insn_ptr;
1806
1807 i = 0;
1808 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1809 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1810 i += 8;
1811 append_insns (&buildaddr, i, buf);
1812 current_insn_ptr = buildaddr;
1813}
1814
1815static void
1816amd64_emit_call (CORE_ADDR fn)
1817{
1818 unsigned char buf[16];
1819 int i;
1820 CORE_ADDR buildaddr;
4e29fb54 1821 LONGEST offset64;
6a271cae
PA
1822
1823 /* The destination function being in the shared library, may be
1824 >31-bits away off the compiled code pad. */
1825
1826 buildaddr = current_insn_ptr;
1827
1828 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1829
1830 i = 0;
1831
1832 if (offset64 > INT_MAX || offset64 < INT_MIN)
1833 {
1834 /* Offset is too large for a call. Use callq, but that requires
1835 a register, so avoid it if possible. Use r10, since it is
1836 call-clobbered, we don't have to push/pop it. */
1837 buf[i++] = 0x48; /* mov $fn,%r10 */
1838 buf[i++] = 0xba;
1839 memcpy (buf + i, &fn, 8);
1840 i += 8;
1841 buf[i++] = 0xff; /* callq *%r10 */
1842 buf[i++] = 0xd2;
1843 }
1844 else
1845 {
1846 int offset32 = offset64; /* we know we can't overflow here. */
1847 memcpy (buf + i, &offset32, 4);
1848 i += 4;
1849 }
1850
1851 append_insns (&buildaddr, i, buf);
1852 current_insn_ptr = buildaddr;
1853}
1854
1855static void
1856amd64_emit_reg (int reg)
1857{
1858 unsigned char buf[16];
1859 int i;
1860 CORE_ADDR buildaddr;
1861
1862 /* Assume raw_regs is still in %rdi. */
1863 buildaddr = current_insn_ptr;
1864 i = 0;
1865 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1866 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1867 i += 4;
1868 append_insns (&buildaddr, i, buf);
1869 current_insn_ptr = buildaddr;
1870 amd64_emit_call (get_raw_reg_func_addr ());
1871}
1872
1873static void
1874amd64_emit_pop (void)
1875{
1876 EMIT_ASM (amd64_pop,
1877 "pop %rax");
1878}
1879
1880static void
1881amd64_emit_stack_flush (void)
1882{
1883 EMIT_ASM (amd64_stack_flush,
1884 "push %rax");
1885}
1886
1887static void
1888amd64_emit_zero_ext (int arg)
1889{
1890 switch (arg)
1891 {
1892 case 8:
1893 EMIT_ASM (amd64_zero_ext_8,
1894 "and $0xff,%rax");
1895 break;
1896 case 16:
1897 EMIT_ASM (amd64_zero_ext_16,
1898 "and $0xffff,%rax");
1899 break;
1900 case 32:
1901 EMIT_ASM (amd64_zero_ext_32,
1902 "mov $0xffffffff,%rcx\n\t"
1903 "and %rcx,%rax");
1904 break;
1905 default:
1906 emit_error = 1;
1907 }
1908}
1909
1910static void
1911amd64_emit_swap (void)
1912{
1913 EMIT_ASM (amd64_swap,
1914 "mov %rax,%rcx\n\t"
1915 "pop %rax\n\t"
1916 "push %rcx");
1917}
1918
1919static void
1920amd64_emit_stack_adjust (int n)
1921{
1922 unsigned char buf[16];
1923 int i;
1924 CORE_ADDR buildaddr = current_insn_ptr;
1925
1926 i = 0;
1927 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1928 buf[i++] = 0x8d;
1929 buf[i++] = 0x64;
1930 buf[i++] = 0x24;
1931 /* This only handles adjustments up to 16, but we don't expect any more. */
1932 buf[i++] = n * 8;
1933 append_insns (&buildaddr, i, buf);
1934 current_insn_ptr = buildaddr;
1935}
1936
1937/* FN's prototype is `LONGEST(*fn)(int)'. */
1938
1939static void
1940amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1941{
1942 unsigned char buf[16];
1943 int i;
1944 CORE_ADDR buildaddr;
1945
1946 buildaddr = current_insn_ptr;
1947 i = 0;
1948 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1949 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1950 i += 4;
1951 append_insns (&buildaddr, i, buf);
1952 current_insn_ptr = buildaddr;
1953 amd64_emit_call (fn);
1954}
1955
4e29fb54 1956/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1957
1958static void
1959amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1960{
1961 unsigned char buf[16];
1962 int i;
1963 CORE_ADDR buildaddr;
1964
1965 buildaddr = current_insn_ptr;
1966 i = 0;
1967 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1968 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1969 i += 4;
1970 append_insns (&buildaddr, i, buf);
1971 current_insn_ptr = buildaddr;
1972 EMIT_ASM (amd64_void_call_2_a,
1973 /* Save away a copy of the stack top. */
1974 "push %rax\n\t"
1975 /* Also pass top as the second argument. */
1976 "mov %rax,%rsi");
1977 amd64_emit_call (fn);
1978 EMIT_ASM (amd64_void_call_2_b,
1979 /* Restore the stack top, %rax may have been trashed. */
1980 "pop %rax");
1981}
1982
6b9801d4
SS
1983void
1984amd64_emit_eq_goto (int *offset_p, int *size_p)
1985{
1986 EMIT_ASM (amd64_eq,
1987 "cmp %rax,(%rsp)\n\t"
1988 "jne .Lamd64_eq_fallthru\n\t"
1989 "lea 0x8(%rsp),%rsp\n\t"
1990 "pop %rax\n\t"
1991 /* jmp, but don't trust the assembler to choose the right jump */
1992 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1993 ".Lamd64_eq_fallthru:\n\t"
1994 "lea 0x8(%rsp),%rsp\n\t"
1995 "pop %rax");
1996
1997 if (offset_p)
1998 *offset_p = 13;
1999 if (size_p)
2000 *size_p = 4;
2001}
2002
2003void
2004amd64_emit_ne_goto (int *offset_p, int *size_p)
2005{
2006 EMIT_ASM (amd64_ne,
2007 "cmp %rax,(%rsp)\n\t"
2008 "je .Lamd64_ne_fallthru\n\t"
2009 "lea 0x8(%rsp),%rsp\n\t"
2010 "pop %rax\n\t"
2011 /* jmp, but don't trust the assembler to choose the right jump */
2012 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2013 ".Lamd64_ne_fallthru:\n\t"
2014 "lea 0x8(%rsp),%rsp\n\t"
2015 "pop %rax");
2016
2017 if (offset_p)
2018 *offset_p = 13;
2019 if (size_p)
2020 *size_p = 4;
2021}
2022
2023void
2024amd64_emit_lt_goto (int *offset_p, int *size_p)
2025{
2026 EMIT_ASM (amd64_lt,
2027 "cmp %rax,(%rsp)\n\t"
2028 "jnl .Lamd64_lt_fallthru\n\t"
2029 "lea 0x8(%rsp),%rsp\n\t"
2030 "pop %rax\n\t"
2031 /* jmp, but don't trust the assembler to choose the right jump */
2032 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2033 ".Lamd64_lt_fallthru:\n\t"
2034 "lea 0x8(%rsp),%rsp\n\t"
2035 "pop %rax");
2036
2037 if (offset_p)
2038 *offset_p = 13;
2039 if (size_p)
2040 *size_p = 4;
2041}
2042
2043void
2044amd64_emit_le_goto (int *offset_p, int *size_p)
2045{
2046 EMIT_ASM (amd64_le,
2047 "cmp %rax,(%rsp)\n\t"
2048 "jnle .Lamd64_le_fallthru\n\t"
2049 "lea 0x8(%rsp),%rsp\n\t"
2050 "pop %rax\n\t"
2051 /* jmp, but don't trust the assembler to choose the right jump */
2052 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2053 ".Lamd64_le_fallthru:\n\t"
2054 "lea 0x8(%rsp),%rsp\n\t"
2055 "pop %rax");
2056
2057 if (offset_p)
2058 *offset_p = 13;
2059 if (size_p)
2060 *size_p = 4;
2061}
2062
2063void
2064amd64_emit_gt_goto (int *offset_p, int *size_p)
2065{
2066 EMIT_ASM (amd64_gt,
2067 "cmp %rax,(%rsp)\n\t"
2068 "jng .Lamd64_gt_fallthru\n\t"
2069 "lea 0x8(%rsp),%rsp\n\t"
2070 "pop %rax\n\t"
2071 /* jmp, but don't trust the assembler to choose the right jump */
2072 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2073 ".Lamd64_gt_fallthru:\n\t"
2074 "lea 0x8(%rsp),%rsp\n\t"
2075 "pop %rax");
2076
2077 if (offset_p)
2078 *offset_p = 13;
2079 if (size_p)
2080 *size_p = 4;
2081}
2082
2083void
2084amd64_emit_ge_goto (int *offset_p, int *size_p)
2085{
2086 EMIT_ASM (amd64_ge,
2087 "cmp %rax,(%rsp)\n\t"
2088 "jnge .Lamd64_ge_fallthru\n\t"
2089 ".Lamd64_ge_jump:\n\t"
2090 "lea 0x8(%rsp),%rsp\n\t"
2091 "pop %rax\n\t"
2092 /* jmp, but don't trust the assembler to choose the right jump */
2093 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2094 ".Lamd64_ge_fallthru:\n\t"
2095 "lea 0x8(%rsp),%rsp\n\t"
2096 "pop %rax");
2097
2098 if (offset_p)
2099 *offset_p = 13;
2100 if (size_p)
2101 *size_p = 4;
2102}
2103
6a271cae
PA
2104struct emit_ops amd64_emit_ops =
2105 {
2106 amd64_emit_prologue,
2107 amd64_emit_epilogue,
2108 amd64_emit_add,
2109 amd64_emit_sub,
2110 amd64_emit_mul,
2111 amd64_emit_lsh,
2112 amd64_emit_rsh_signed,
2113 amd64_emit_rsh_unsigned,
2114 amd64_emit_ext,
2115 amd64_emit_log_not,
2116 amd64_emit_bit_and,
2117 amd64_emit_bit_or,
2118 amd64_emit_bit_xor,
2119 amd64_emit_bit_not,
2120 amd64_emit_equal,
2121 amd64_emit_less_signed,
2122 amd64_emit_less_unsigned,
2123 amd64_emit_ref,
2124 amd64_emit_if_goto,
2125 amd64_emit_goto,
2126 amd64_write_goto_address,
2127 amd64_emit_const,
2128 amd64_emit_call,
2129 amd64_emit_reg,
2130 amd64_emit_pop,
2131 amd64_emit_stack_flush,
2132 amd64_emit_zero_ext,
2133 amd64_emit_swap,
2134 amd64_emit_stack_adjust,
2135 amd64_emit_int_call_1,
6b9801d4
SS
2136 amd64_emit_void_call_2,
2137 amd64_emit_eq_goto,
2138 amd64_emit_ne_goto,
2139 amd64_emit_lt_goto,
2140 amd64_emit_le_goto,
2141 amd64_emit_gt_goto,
2142 amd64_emit_ge_goto
6a271cae
PA
2143 };
2144
2145#endif /* __x86_64__ */
2146
2147static void
2148i386_emit_prologue (void)
2149{
2150 EMIT_ASM32 (i386_prologue,
2151 "push %ebp\n\t"
bf15cbda
SS
2152 "mov %esp,%ebp\n\t"
2153 "push %ebx");
6a271cae
PA
2154 /* At this point, the raw regs base address is at 8(%ebp), and the
2155 value pointer is at 12(%ebp). */
2156}
2157
2158static void
2159i386_emit_epilogue (void)
2160{
2161 EMIT_ASM32 (i386_epilogue,
2162 "mov 12(%ebp),%ecx\n\t"
2163 "mov %eax,(%ecx)\n\t"
2164 "mov %ebx,0x4(%ecx)\n\t"
2165 "xor %eax,%eax\n\t"
bf15cbda 2166 "pop %ebx\n\t"
6a271cae
PA
2167 "pop %ebp\n\t"
2168 "ret");
2169}
2170
2171static void
2172i386_emit_add (void)
2173{
2174 EMIT_ASM32 (i386_add,
2175 "add (%esp),%eax\n\t"
2176 "adc 0x4(%esp),%ebx\n\t"
2177 "lea 0x8(%esp),%esp");
2178}
2179
2180static void
2181i386_emit_sub (void)
2182{
2183 EMIT_ASM32 (i386_sub,
2184 "subl %eax,(%esp)\n\t"
2185 "sbbl %ebx,4(%esp)\n\t"
2186 "pop %eax\n\t"
2187 "pop %ebx\n\t");
2188}
2189
2190static void
2191i386_emit_mul (void)
2192{
2193 emit_error = 1;
2194}
2195
2196static void
2197i386_emit_lsh (void)
2198{
2199 emit_error = 1;
2200}
2201
2202static void
2203i386_emit_rsh_signed (void)
2204{
2205 emit_error = 1;
2206}
2207
2208static void
2209i386_emit_rsh_unsigned (void)
2210{
2211 emit_error = 1;
2212}
2213
2214static void
2215i386_emit_ext (int arg)
2216{
2217 switch (arg)
2218 {
2219 case 8:
2220 EMIT_ASM32 (i386_ext_8,
2221 "cbtw\n\t"
2222 "cwtl\n\t"
2223 "movl %eax,%ebx\n\t"
2224 "sarl $31,%ebx");
2225 break;
2226 case 16:
2227 EMIT_ASM32 (i386_ext_16,
2228 "cwtl\n\t"
2229 "movl %eax,%ebx\n\t"
2230 "sarl $31,%ebx");
2231 break;
2232 case 32:
2233 EMIT_ASM32 (i386_ext_32,
2234 "movl %eax,%ebx\n\t"
2235 "sarl $31,%ebx");
2236 break;
2237 default:
2238 emit_error = 1;
2239 }
2240}
2241
2242static void
2243i386_emit_log_not (void)
2244{
2245 EMIT_ASM32 (i386_log_not,
2246 "or %ebx,%eax\n\t"
2247 "test %eax,%eax\n\t"
2248 "sete %cl\n\t"
2249 "xor %ebx,%ebx\n\t"
2250 "movzbl %cl,%eax");
2251}
2252
2253static void
2254i386_emit_bit_and (void)
2255{
2256 EMIT_ASM32 (i386_and,
2257 "and (%esp),%eax\n\t"
2258 "and 0x4(%esp),%ebx\n\t"
2259 "lea 0x8(%esp),%esp");
2260}
2261
2262static void
2263i386_emit_bit_or (void)
2264{
2265 EMIT_ASM32 (i386_or,
2266 "or (%esp),%eax\n\t"
2267 "or 0x4(%esp),%ebx\n\t"
2268 "lea 0x8(%esp),%esp");
2269}
2270
2271static void
2272i386_emit_bit_xor (void)
2273{
2274 EMIT_ASM32 (i386_xor,
2275 "xor (%esp),%eax\n\t"
2276 "xor 0x4(%esp),%ebx\n\t"
2277 "lea 0x8(%esp),%esp");
2278}
2279
2280static void
2281i386_emit_bit_not (void)
2282{
2283 EMIT_ASM32 (i386_bit_not,
2284 "xor $0xffffffff,%eax\n\t"
2285 "xor $0xffffffff,%ebx\n\t");
2286}
2287
2288static void
2289i386_emit_equal (void)
2290{
2291 EMIT_ASM32 (i386_equal,
2292 "cmpl %ebx,4(%esp)\n\t"
2293 "jne .Li386_equal_false\n\t"
2294 "cmpl %eax,(%esp)\n\t"
2295 "je .Li386_equal_true\n\t"
2296 ".Li386_equal_false:\n\t"
2297 "xor %eax,%eax\n\t"
2298 "jmp .Li386_equal_end\n\t"
2299 ".Li386_equal_true:\n\t"
2300 "mov $1,%eax\n\t"
2301 ".Li386_equal_end:\n\t"
2302 "xor %ebx,%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304}
2305
2306static void
2307i386_emit_less_signed (void)
2308{
2309 EMIT_ASM32 (i386_less_signed,
2310 "cmpl %ebx,4(%esp)\n\t"
2311 "jl .Li386_less_signed_true\n\t"
2312 "jne .Li386_less_signed_false\n\t"
2313 "cmpl %eax,(%esp)\n\t"
2314 "jl .Li386_less_signed_true\n\t"
2315 ".Li386_less_signed_false:\n\t"
2316 "xor %eax,%eax\n\t"
2317 "jmp .Li386_less_signed_end\n\t"
2318 ".Li386_less_signed_true:\n\t"
2319 "mov $1,%eax\n\t"
2320 ".Li386_less_signed_end:\n\t"
2321 "xor %ebx,%ebx\n\t"
2322 "lea 0x8(%esp),%esp");
2323}
2324
2325static void
2326i386_emit_less_unsigned (void)
2327{
2328 EMIT_ASM32 (i386_less_unsigned,
2329 "cmpl %ebx,4(%esp)\n\t"
2330 "jb .Li386_less_unsigned_true\n\t"
2331 "jne .Li386_less_unsigned_false\n\t"
2332 "cmpl %eax,(%esp)\n\t"
2333 "jb .Li386_less_unsigned_true\n\t"
2334 ".Li386_less_unsigned_false:\n\t"
2335 "xor %eax,%eax\n\t"
2336 "jmp .Li386_less_unsigned_end\n\t"
2337 ".Li386_less_unsigned_true:\n\t"
2338 "mov $1,%eax\n\t"
2339 ".Li386_less_unsigned_end:\n\t"
2340 "xor %ebx,%ebx\n\t"
2341 "lea 0x8(%esp),%esp");
2342}
2343
2344static void
2345i386_emit_ref (int size)
2346{
2347 switch (size)
2348 {
2349 case 1:
2350 EMIT_ASM32 (i386_ref1,
2351 "movb (%eax),%al");
2352 break;
2353 case 2:
2354 EMIT_ASM32 (i386_ref2,
2355 "movw (%eax),%ax");
2356 break;
2357 case 4:
2358 EMIT_ASM32 (i386_ref4,
2359 "movl (%eax),%eax");
2360 break;
2361 case 8:
2362 EMIT_ASM32 (i386_ref8,
2363 "movl 4(%eax),%ebx\n\t"
2364 "movl (%eax),%eax");
2365 break;
2366 }
2367}
2368
2369static void
2370i386_emit_if_goto (int *offset_p, int *size_p)
2371{
2372 EMIT_ASM32 (i386_if_goto,
2373 "mov %eax,%ecx\n\t"
2374 "or %ebx,%ecx\n\t"
2375 "pop %eax\n\t"
2376 "pop %ebx\n\t"
2377 "cmpl $0,%ecx\n\t"
2378 /* Don't trust the assembler to choose the right jump */
2379 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2380
2381 if (offset_p)
2382 *offset_p = 11; /* be sure that this matches the sequence above */
2383 if (size_p)
2384 *size_p = 4;
2385}
2386
2387static void
2388i386_emit_goto (int *offset_p, int *size_p)
2389{
2390 EMIT_ASM32 (i386_goto,
2391 /* Don't trust the assembler to choose the right jump */
2392 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2393 if (offset_p)
2394 *offset_p = 1;
2395 if (size_p)
2396 *size_p = 4;
2397}
2398
2399static void
2400i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2401{
2402 int diff = (to - (from + size));
2403 unsigned char buf[sizeof (int)];
2404
2405 /* We're only doing 4-byte sizes at the moment. */
2406 if (size != 4)
2407 {
2408 emit_error = 1;
2409 return;
2410 }
2411
2412 memcpy (buf, &diff, sizeof (int));
2413 write_inferior_memory (from, buf, sizeof (int));
2414}
2415
2416static void
4e29fb54 2417i386_emit_const (LONGEST num)
6a271cae
PA
2418{
2419 unsigned char buf[16];
b00ad6ff 2420 int i, hi, lo;
6a271cae
PA
2421 CORE_ADDR buildaddr = current_insn_ptr;
2422
2423 i = 0;
2424 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2425 lo = num & 0xffffffff;
2426 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2427 i += 4;
2428 hi = ((num >> 32) & 0xffffffff);
2429 if (hi)
2430 {
2431 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2432 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2433 i += 4;
2434 }
2435 else
2436 {
2437 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2438 }
2439 append_insns (&buildaddr, i, buf);
2440 current_insn_ptr = buildaddr;
2441}
2442
2443static void
2444i386_emit_call (CORE_ADDR fn)
2445{
2446 unsigned char buf[16];
2447 int i, offset;
2448 CORE_ADDR buildaddr;
2449
2450 buildaddr = current_insn_ptr;
2451 i = 0;
2452 buf[i++] = 0xe8; /* call <reladdr> */
2453 offset = ((int) fn) - (buildaddr + 5);
2454 memcpy (buf + 1, &offset, 4);
2455 append_insns (&buildaddr, 5, buf);
2456 current_insn_ptr = buildaddr;
2457}
2458
2459static void
2460i386_emit_reg (int reg)
2461{
2462 unsigned char buf[16];
2463 int i;
2464 CORE_ADDR buildaddr;
2465
2466 EMIT_ASM32 (i386_reg_a,
2467 "sub $0x8,%esp");
2468 buildaddr = current_insn_ptr;
2469 i = 0;
2470 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2471 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2472 i += 4;
2473 append_insns (&buildaddr, i, buf);
2474 current_insn_ptr = buildaddr;
2475 EMIT_ASM32 (i386_reg_b,
2476 "mov %eax,4(%esp)\n\t"
2477 "mov 8(%ebp),%eax\n\t"
2478 "mov %eax,(%esp)");
2479 i386_emit_call (get_raw_reg_func_addr ());
2480 EMIT_ASM32 (i386_reg_c,
2481 "xor %ebx,%ebx\n\t"
2482 "lea 0x8(%esp),%esp");
2483}
2484
2485static void
2486i386_emit_pop (void)
2487{
2488 EMIT_ASM32 (i386_pop,
2489 "pop %eax\n\t"
2490 "pop %ebx");
2491}
2492
2493static void
2494i386_emit_stack_flush (void)
2495{
2496 EMIT_ASM32 (i386_stack_flush,
2497 "push %ebx\n\t"
2498 "push %eax");
2499}
2500
2501static void
2502i386_emit_zero_ext (int arg)
2503{
2504 switch (arg)
2505 {
2506 case 8:
2507 EMIT_ASM32 (i386_zero_ext_8,
2508 "and $0xff,%eax\n\t"
2509 "xor %ebx,%ebx");
2510 break;
2511 case 16:
2512 EMIT_ASM32 (i386_zero_ext_16,
2513 "and $0xffff,%eax\n\t"
2514 "xor %ebx,%ebx");
2515 break;
2516 case 32:
2517 EMIT_ASM32 (i386_zero_ext_32,
2518 "xor %ebx,%ebx");
2519 break;
2520 default:
2521 emit_error = 1;
2522 }
2523}
2524
2525static void
2526i386_emit_swap (void)
2527{
2528 EMIT_ASM32 (i386_swap,
2529 "mov %eax,%ecx\n\t"
2530 "mov %ebx,%edx\n\t"
2531 "pop %eax\n\t"
2532 "pop %ebx\n\t"
2533 "push %edx\n\t"
2534 "push %ecx");
2535}
2536
2537static void
2538i386_emit_stack_adjust (int n)
2539{
2540 unsigned char buf[16];
2541 int i;
2542 CORE_ADDR buildaddr = current_insn_ptr;
2543
2544 i = 0;
2545 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2546 buf[i++] = 0x64;
2547 buf[i++] = 0x24;
2548 buf[i++] = n * 8;
2549 append_insns (&buildaddr, i, buf);
2550 current_insn_ptr = buildaddr;
2551}
2552
2553/* FN's prototype is `LONGEST(*fn)(int)'. */
2554
2555static void
2556i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2557{
2558 unsigned char buf[16];
2559 int i;
2560 CORE_ADDR buildaddr;
2561
2562 EMIT_ASM32 (i386_int_call_1_a,
2563 /* Reserve a bit of stack space. */
2564 "sub $0x8,%esp");
2565 /* Put the one argument on the stack. */
2566 buildaddr = current_insn_ptr;
2567 i = 0;
2568 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2569 buf[i++] = 0x04;
2570 buf[i++] = 0x24;
b00ad6ff 2571 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2572 i += 4;
2573 append_insns (&buildaddr, i, buf);
2574 current_insn_ptr = buildaddr;
2575 i386_emit_call (fn);
2576 EMIT_ASM32 (i386_int_call_1_c,
2577 "mov %edx,%ebx\n\t"
2578 "lea 0x8(%esp),%esp");
2579}
2580
4e29fb54 2581/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2582
2583static void
2584i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2585{
2586 unsigned char buf[16];
2587 int i;
2588 CORE_ADDR buildaddr;
2589
2590 EMIT_ASM32 (i386_void_call_2_a,
2591 /* Preserve %eax only; we don't have to worry about %ebx. */
2592 "push %eax\n\t"
2593 /* Reserve a bit of stack space for arguments. */
2594 "sub $0x10,%esp\n\t"
2595 /* Copy "top" to the second argument position. (Note that
2596 we can't assume function won't scribble on its
2597 arguments, so don't try to restore from this.) */
2598 "mov %eax,4(%esp)\n\t"
2599 "mov %ebx,8(%esp)");
2600 /* Put the first argument on the stack. */
2601 buildaddr = current_insn_ptr;
2602 i = 0;
2603 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2604 buf[i++] = 0x04;
2605 buf[i++] = 0x24;
b00ad6ff 2606 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2607 i += 4;
2608 append_insns (&buildaddr, i, buf);
2609 current_insn_ptr = buildaddr;
2610 i386_emit_call (fn);
2611 EMIT_ASM32 (i386_void_call_2_b,
2612 "lea 0x10(%esp),%esp\n\t"
2613 /* Restore original stack top. */
2614 "pop %eax");
2615}
2616
6b9801d4
SS
2617
2618void
2619i386_emit_eq_goto (int *offset_p, int *size_p)
2620{
2621 EMIT_ASM32 (eq,
2622 /* Check low half first, more likely to be decider */
2623 "cmpl %eax,(%esp)\n\t"
2624 "jne .Leq_fallthru\n\t"
2625 "cmpl %ebx,4(%esp)\n\t"
2626 "jne .Leq_fallthru\n\t"
2627 "lea 0x8(%esp),%esp\n\t"
2628 "pop %eax\n\t"
2629 "pop %ebx\n\t"
2630 /* jmp, but don't trust the assembler to choose the right jump */
2631 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2632 ".Leq_fallthru:\n\t"
2633 "lea 0x8(%esp),%esp\n\t"
2634 "pop %eax\n\t"
2635 "pop %ebx");
2636
2637 if (offset_p)
2638 *offset_p = 18;
2639 if (size_p)
2640 *size_p = 4;
2641}
2642
2643void
2644i386_emit_ne_goto (int *offset_p, int *size_p)
2645{
2646 EMIT_ASM32 (ne,
2647 /* Check low half first, more likely to be decider */
2648 "cmpl %eax,(%esp)\n\t"
2649 "jne .Lne_jump\n\t"
2650 "cmpl %ebx,4(%esp)\n\t"
2651 "je .Lne_fallthru\n\t"
2652 ".Lne_jump:\n\t"
2653 "lea 0x8(%esp),%esp\n\t"
2654 "pop %eax\n\t"
2655 "pop %ebx\n\t"
2656 /* jmp, but don't trust the assembler to choose the right jump */
2657 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2658 ".Lne_fallthru:\n\t"
2659 "lea 0x8(%esp),%esp\n\t"
2660 "pop %eax\n\t"
2661 "pop %ebx");
2662
2663 if (offset_p)
2664 *offset_p = 18;
2665 if (size_p)
2666 *size_p = 4;
2667}
2668
2669void
2670i386_emit_lt_goto (int *offset_p, int *size_p)
2671{
2672 EMIT_ASM32 (lt,
2673 "cmpl %ebx,4(%esp)\n\t"
2674 "jl .Llt_jump\n\t"
2675 "jne .Llt_fallthru\n\t"
2676 "cmpl %eax,(%esp)\n\t"
2677 "jnl .Llt_fallthru\n\t"
2678 ".Llt_jump:\n\t"
2679 "lea 0x8(%esp),%esp\n\t"
2680 "pop %eax\n\t"
2681 "pop %ebx\n\t"
2682 /* jmp, but don't trust the assembler to choose the right jump */
2683 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2684 ".Llt_fallthru:\n\t"
2685 "lea 0x8(%esp),%esp\n\t"
2686 "pop %eax\n\t"
2687 "pop %ebx");
2688
2689 if (offset_p)
2690 *offset_p = 20;
2691 if (size_p)
2692 *size_p = 4;
2693}
2694
2695void
2696i386_emit_le_goto (int *offset_p, int *size_p)
2697{
2698 EMIT_ASM32 (le,
2699 "cmpl %ebx,4(%esp)\n\t"
2700 "jle .Lle_jump\n\t"
2701 "jne .Lle_fallthru\n\t"
2702 "cmpl %eax,(%esp)\n\t"
2703 "jnle .Lle_fallthru\n\t"
2704 ".Lle_jump:\n\t"
2705 "lea 0x8(%esp),%esp\n\t"
2706 "pop %eax\n\t"
2707 "pop %ebx\n\t"
2708 /* jmp, but don't trust the assembler to choose the right jump */
2709 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2710 ".Lle_fallthru:\n\t"
2711 "lea 0x8(%esp),%esp\n\t"
2712 "pop %eax\n\t"
2713 "pop %ebx");
2714
2715 if (offset_p)
2716 *offset_p = 20;
2717 if (size_p)
2718 *size_p = 4;
2719}
2720
2721void
2722i386_emit_gt_goto (int *offset_p, int *size_p)
2723{
2724 EMIT_ASM32 (gt,
2725 "cmpl %ebx,4(%esp)\n\t"
2726 "jg .Lgt_jump\n\t"
2727 "jne .Lgt_fallthru\n\t"
2728 "cmpl %eax,(%esp)\n\t"
2729 "jng .Lgt_fallthru\n\t"
2730 ".Lgt_jump:\n\t"
2731 "lea 0x8(%esp),%esp\n\t"
2732 "pop %eax\n\t"
2733 "pop %ebx\n\t"
2734 /* jmp, but don't trust the assembler to choose the right jump */
2735 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2736 ".Lgt_fallthru:\n\t"
2737 "lea 0x8(%esp),%esp\n\t"
2738 "pop %eax\n\t"
2739 "pop %ebx");
2740
2741 if (offset_p)
2742 *offset_p = 20;
2743 if (size_p)
2744 *size_p = 4;
2745}
2746
2747void
2748i386_emit_ge_goto (int *offset_p, int *size_p)
2749{
2750 EMIT_ASM32 (ge,
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "jge .Lge_jump\n\t"
2753 "jne .Lge_fallthru\n\t"
2754 "cmpl %eax,(%esp)\n\t"
2755 "jnge .Lge_fallthru\n\t"
2756 ".Lge_jump:\n\t"
2757 "lea 0x8(%esp),%esp\n\t"
2758 "pop %eax\n\t"
2759 "pop %ebx\n\t"
2760 /* jmp, but don't trust the assembler to choose the right jump */
2761 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2762 ".Lge_fallthru:\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2764 "pop %eax\n\t"
2765 "pop %ebx");
2766
2767 if (offset_p)
2768 *offset_p = 20;
2769 if (size_p)
2770 *size_p = 4;
2771}
2772
6a271cae
PA
2773struct emit_ops i386_emit_ops =
2774 {
2775 i386_emit_prologue,
2776 i386_emit_epilogue,
2777 i386_emit_add,
2778 i386_emit_sub,
2779 i386_emit_mul,
2780 i386_emit_lsh,
2781 i386_emit_rsh_signed,
2782 i386_emit_rsh_unsigned,
2783 i386_emit_ext,
2784 i386_emit_log_not,
2785 i386_emit_bit_and,
2786 i386_emit_bit_or,
2787 i386_emit_bit_xor,
2788 i386_emit_bit_not,
2789 i386_emit_equal,
2790 i386_emit_less_signed,
2791 i386_emit_less_unsigned,
2792 i386_emit_ref,
2793 i386_emit_if_goto,
2794 i386_emit_goto,
2795 i386_write_goto_address,
2796 i386_emit_const,
2797 i386_emit_call,
2798 i386_emit_reg,
2799 i386_emit_pop,
2800 i386_emit_stack_flush,
2801 i386_emit_zero_ext,
2802 i386_emit_swap,
2803 i386_emit_stack_adjust,
2804 i386_emit_int_call_1,
6b9801d4
SS
2805 i386_emit_void_call_2,
2806 i386_emit_eq_goto,
2807 i386_emit_ne_goto,
2808 i386_emit_lt_goto,
2809 i386_emit_le_goto,
2810 i386_emit_gt_goto,
2811 i386_emit_ge_goto
6a271cae
PA
2812 };
2813
2814
2815static struct emit_ops *
2816x86_emit_ops (void)
2817{
2818#ifdef __x86_64__
3aee8918 2819 if (is_64bit_tdesc ())
6a271cae
PA
2820 return &amd64_emit_ops;
2821 else
2822#endif
2823 return &i386_emit_ops;
2824}
2825
dd373349
AT
2826/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2827
2828static const gdb_byte *
2829x86_sw_breakpoint_from_kind (int kind, int *size)
2830{
2831 *size = x86_breakpoint_len;
2832 return x86_breakpoint;
2833}
2834
c2d6af84
PA
2835static int
2836x86_supports_range_stepping (void)
2837{
2838 return 1;
2839}
2840
7d00775e
AT
2841/* Implementation of linux_target_ops method "supports_hardware_single_step".
2842 */
2843
2844static int
2845x86_supports_hardware_single_step (void)
2846{
2847 return 1;
2848}
2849
ae91f625
MK
2850static int
2851x86_get_ipa_tdesc_idx (void)
2852{
2853 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2854 const struct target_desc *tdesc = regcache->tdesc;
2855
2856#ifdef __x86_64__
2857 if (tdesc == tdesc_amd64_linux || tdesc == tdesc_amd64_linux_no_xml
2858 || tdesc == tdesc_x32_linux)
2859 return X86_TDESC_SSE;
2860 if (tdesc == tdesc_amd64_avx_linux || tdesc == tdesc_x32_avx_linux)
2861 return X86_TDESC_AVX;
2862 if (tdesc == tdesc_amd64_mpx_linux)
2863 return X86_TDESC_MPX;
2b863f51
WT
2864 if (tdesc == tdesc_amd64_avx_mpx_linux)
2865 return X86_TDESC_AVX_MPX;
ae91f625
MK
2866 if (tdesc == tdesc_amd64_avx512_linux || tdesc == tdesc_x32_avx512_linux)
2867 return X86_TDESC_AVX512;
2868#endif
2869
2870 if (tdesc == tdesc_i386_mmx_linux)
2871 return X86_TDESC_MMX;
2872 if (tdesc == tdesc_i386_linux || tdesc == tdesc_i386_linux_no_xml)
2873 return X86_TDESC_SSE;
2874 if (tdesc == tdesc_i386_avx_linux)
2875 return X86_TDESC_AVX;
2876 if (tdesc == tdesc_i386_mpx_linux)
2877 return X86_TDESC_MPX;
2b863f51
WT
2878 if (tdesc == tdesc_i386_avx_mpx_linux)
2879 return X86_TDESC_AVX_MPX;
ae91f625
MK
2880 if (tdesc == tdesc_i386_avx512_linux)
2881 return X86_TDESC_AVX512;
2882
2883 return 0;
2884}
2885
d0722149
DE
2886/* This is initialized assuming an amd64 target.
2887 x86_arch_setup will correct it for i386 or amd64 targets. */
2888
2889struct linux_target_ops the_low_target =
2890{
2891 x86_arch_setup,
3aee8918
PA
2892 x86_linux_regs_info,
2893 x86_cannot_fetch_register,
2894 x86_cannot_store_register,
c14dfd32 2895 NULL, /* fetch_register */
d0722149
DE
2896 x86_get_pc,
2897 x86_set_pc,
dd373349
AT
2898 NULL, /* breakpoint_kind_from_pc */
2899 x86_sw_breakpoint_from_kind,
d0722149
DE
2900 NULL,
2901 1,
2902 x86_breakpoint_at,
802e8e6d 2903 x86_supports_z_point_type,
aa5ca48f
DE
2904 x86_insert_point,
2905 x86_remove_point,
2906 x86_stopped_by_watchpoint,
2907 x86_stopped_data_address,
d0722149
DE
2908 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2909 native i386 case (no registers smaller than an xfer unit), and are not
2910 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2911 NULL,
2912 NULL,
2913 /* need to fix up i386 siginfo if host is amd64 */
2914 x86_siginfo_fixup,
aa5ca48f
DE
2915 x86_linux_new_process,
2916 x86_linux_new_thread,
3a8a0396 2917 x86_linux_new_fork,
1570b33e 2918 x86_linux_prepare_to_resume,
219f2f23 2919 x86_linux_process_qsupported,
fa593d66
PA
2920 x86_supports_tracepoints,
2921 x86_get_thread_area,
6a271cae 2922 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2923 x86_emit_ops,
2924 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2925 x86_supports_range_stepping,
7d00775e
AT
2926 NULL, /* breakpoint_kind_from_current_state */
2927 x86_supports_hardware_single_step,
82075af2 2928 x86_get_syscall_trapinfo,
ae91f625 2929 x86_get_ipa_tdesc_idx,
d0722149 2930};
3aee8918
PA
2931
2932void
2933initialize_low_arch (void)
2934{
2935 /* Initialize the Linux target descriptions. */
2936#ifdef __x86_64__
2937 init_registers_amd64_linux ();
2938 init_registers_amd64_avx_linux ();
01f9f808 2939 init_registers_amd64_avx512_linux ();
a196ebeb 2940 init_registers_amd64_mpx_linux ();
2b863f51 2941 init_registers_amd64_avx_mpx_linux ();
a196ebeb 2942
3aee8918 2943 init_registers_x32_linux ();
7e5aaa09 2944 init_registers_x32_avx_linux ();
01f9f808 2945 init_registers_x32_avx512_linux ();
3aee8918 2946
8d749320 2947 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
3aee8918
PA
2948 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2949 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2950#endif
2951 init_registers_i386_linux ();
2952 init_registers_i386_mmx_linux ();
2953 init_registers_i386_avx_linux ();
01f9f808 2954 init_registers_i386_avx512_linux ();
a196ebeb 2955 init_registers_i386_mpx_linux ();
2b863f51 2956 init_registers_i386_avx_mpx_linux ();
3aee8918 2957
8d749320 2958 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
3aee8918
PA
2959 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
2960 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2961
2962 initialize_regsets_info (&x86_regsets_info);
2963}
This page took 0.716368 seconds and 4 git commands to generate.