x32 Fast tracepoints: Customize jump pad address
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
618f726f 3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
58b4daa5 41#include "agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
5826e159 75#include "nat/gdb_ptrace.h"
1570b33e
L
76#include <sys/uio.h>
77
d0722149
DE
78#ifndef PTRACE_GET_THREAD_AREA
79#define PTRACE_GET_THREAD_AREA 25
80#endif
81
82/* This definition comes from prctl.h, but some kernels may not have it. */
83#ifndef PTRACE_ARCH_PRCTL
84#define PTRACE_ARCH_PRCTL 30
85#endif
86
87/* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89#ifndef ARCH_GET_FS
90#define ARCH_SET_GS 0x1001
91#define ARCH_SET_FS 0x1002
92#define ARCH_GET_FS 0x1003
93#define ARCH_GET_GS 0x1004
94#endif
95
aa5ca48f
DE
96/* Per-process arch-specific data we want to keep. */
97
98struct arch_process_info
99{
df7e5265 100 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
101};
102
d0722149
DE
103#ifdef __x86_64__
104
105/* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108static /*const*/ int i386_regmap[] =
109{
110 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
111 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
112 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
113 DS * 8, ES * 8, FS * 8, GS * 8
114};
115
116#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117
118/* So code below doesn't have to care, i386 or amd64. */
119#define ORIG_EAX ORIG_RAX
bc9540e8 120#define REGSIZE 8
d0722149
DE
121
122static const int x86_64_regmap[] =
123{
124 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
125 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
126 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
127 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
128 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
129 DS * 8, ES * 8, FS * 8, GS * 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
133 -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 ORIG_RAX * 8,
136 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
137 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
138 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
147};
148
149#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 150#define X86_64_USER_REGS (GS + 1)
d0722149
DE
151
152#else /* ! __x86_64__ */
153
154/* Mapping between the general-purpose registers in `struct user'
155 format and GDB's register array layout. */
156static /*const*/ int i386_regmap[] =
157{
158 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
159 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
160 EIP * 4, EFL * 4, CS * 4, SS * 4,
161 DS * 4, ES * 4, FS * 4, GS * 4
162};
163
164#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
bc9540e8
PA
166#define REGSIZE 4
167
d0722149 168#endif
3aee8918
PA
169
170#ifdef __x86_64__
171
172/* Returns true if the current inferior belongs to a x86-64 process,
173 per the tdesc. */
174
175static int
176is_64bit_tdesc (void)
177{
0bfdf32f 178 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
179
180 return register_size (regcache->tdesc, 0) == 8;
181}
182
183#endif
184
d0722149
DE
185\f
186/* Called by libthread_db. */
187
188ps_err_e
189ps_get_thread_area (const struct ps_prochandle *ph,
190 lwpid_t lwpid, int idx, void **base)
191{
192#ifdef __x86_64__
3aee8918 193 int use_64bit = is_64bit_tdesc ();
d0722149
DE
194
195 if (use_64bit)
196 {
197 switch (idx)
198 {
199 case FS:
200 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
201 return PS_OK;
202 break;
203 case GS:
204 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
205 return PS_OK;
206 break;
207 default:
208 return PS_BADADDR;
209 }
210 return PS_ERR;
211 }
212#endif
213
214 {
215 unsigned int desc[4];
216
217 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
218 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
219 return PS_ERR;
220
d1ec4ce7
DE
221 /* Ensure we properly extend the value to 64-bits for x86_64. */
222 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
223 return PS_OK;
224 }
225}
fa593d66
PA
226
227/* Get the thread area address. This is used to recognize which
228 thread is which when tracing with the in-process agent library. We
229 don't read anything from the address, and treat it as opaque; it's
230 the address itself that we assume is unique per-thread. */
231
232static int
233x86_get_thread_area (int lwpid, CORE_ADDR *addr)
234{
235#ifdef __x86_64__
3aee8918 236 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
237
238 if (use_64bit)
239 {
240 void *base;
241 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
242 {
243 *addr = (CORE_ADDR) (uintptr_t) base;
244 return 0;
245 }
246
247 return -1;
248 }
249#endif
250
251 {
252 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
253 struct thread_info *thr = get_lwp_thread (lwp);
254 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
255 unsigned int desc[4];
256 ULONGEST gs = 0;
257 const int reg_thread_area = 3; /* bits to scale down register value. */
258 int idx;
259
260 collect_register_by_name (regcache, "gs", &gs);
261
262 idx = gs >> reg_thread_area;
263
264 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 265 lwpid_of (thr),
493e2a69 266 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
267 return -1;
268
269 *addr = desc[1];
270 return 0;
271 }
272}
273
274
d0722149
DE
275\f
276static int
3aee8918 277x86_cannot_store_register (int regno)
d0722149 278{
3aee8918
PA
279#ifdef __x86_64__
280 if (is_64bit_tdesc ())
281 return 0;
282#endif
283
d0722149
DE
284 return regno >= I386_NUM_REGS;
285}
286
287static int
3aee8918 288x86_cannot_fetch_register (int regno)
d0722149 289{
3aee8918
PA
290#ifdef __x86_64__
291 if (is_64bit_tdesc ())
292 return 0;
293#endif
294
d0722149
DE
295 return regno >= I386_NUM_REGS;
296}
297
298static void
442ea881 299x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
300{
301 int i;
302
303#ifdef __x86_64__
3aee8918 304 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
305 {
306 for (i = 0; i < X86_64_NUM_REGS; i++)
307 if (x86_64_regmap[i] != -1)
442ea881 308 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
309 return;
310 }
9e0aa64f
JK
311
312 /* 32-bit inferior registers need to be zero-extended.
313 Callers would read uninitialized memory otherwise. */
314 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
315#endif
316
317 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 318 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 319
442ea881 320 collect_register_by_name (regcache, "orig_eax",
bc9540e8 321 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
322}
323
324static void
442ea881 325x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
326{
327 int i;
328
329#ifdef __x86_64__
3aee8918 330 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
331 {
332 for (i = 0; i < X86_64_NUM_REGS; i++)
333 if (x86_64_regmap[i] != -1)
442ea881 334 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
335 return;
336 }
337#endif
338
339 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 340 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 341
442ea881 342 supply_register_by_name (regcache, "orig_eax",
bc9540e8 343 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
344}
345
346static void
442ea881 347x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
348{
349#ifdef __x86_64__
442ea881 350 i387_cache_to_fxsave (regcache, buf);
d0722149 351#else
442ea881 352 i387_cache_to_fsave (regcache, buf);
d0722149
DE
353#endif
354}
355
356static void
442ea881 357x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
358{
359#ifdef __x86_64__
442ea881 360 i387_fxsave_to_cache (regcache, buf);
d0722149 361#else
442ea881 362 i387_fsave_to_cache (regcache, buf);
d0722149
DE
363#endif
364}
365
366#ifndef __x86_64__
367
368static void
442ea881 369x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 370{
442ea881 371 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
372}
373
374static void
442ea881 375x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 376{
442ea881 377 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
378}
379
380#endif
381
1570b33e
L
382static void
383x86_fill_xstateregset (struct regcache *regcache, void *buf)
384{
385 i387_cache_to_xsave (regcache, buf);
386}
387
388static void
389x86_store_xstateregset (struct regcache *regcache, const void *buf)
390{
391 i387_xsave_to_cache (regcache, buf);
392}
393
d0722149
DE
394/* ??? The non-biarch i386 case stores all the i387 regs twice.
395 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
396 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
397 doesn't work. IWBN to avoid the duplication in the case where it
398 does work. Maybe the arch_setup routine could check whether it works
3aee8918 399 and update the supported regsets accordingly. */
d0722149 400
3aee8918 401static struct regset_info x86_regsets[] =
d0722149
DE
402{
403#ifdef HAVE_PTRACE_GETREGS
1570b33e 404 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
405 GENERAL_REGS,
406 x86_fill_gregset, x86_store_gregset },
1570b33e
L
407 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
408 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
409# ifndef __x86_64__
410# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 411 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
412 EXTENDED_REGS,
413 x86_fill_fpxregset, x86_store_fpxregset },
414# endif
415# endif
1570b33e 416 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
417 FP_REGS,
418 x86_fill_fpregset, x86_store_fpregset },
419#endif /* HAVE_PTRACE_GETREGS */
50bc912a 420 NULL_REGSET
d0722149
DE
421};
422
423static CORE_ADDR
442ea881 424x86_get_pc (struct regcache *regcache)
d0722149 425{
3aee8918 426 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
427
428 if (use_64bit)
429 {
6598661d
PA
430 uint64_t pc;
431
442ea881 432 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
433 return (CORE_ADDR) pc;
434 }
435 else
436 {
6598661d
PA
437 uint32_t pc;
438
442ea881 439 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
440 return (CORE_ADDR) pc;
441 }
442}
443
444static void
442ea881 445x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 446{
3aee8918 447 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
448
449 if (use_64bit)
450 {
6598661d
PA
451 uint64_t newpc = pc;
452
442ea881 453 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
454 }
455 else
456 {
6598661d
PA
457 uint32_t newpc = pc;
458
442ea881 459 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
460 }
461}
462\f
dd373349 463static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
464#define x86_breakpoint_len 1
465
466static int
467x86_breakpoint_at (CORE_ADDR pc)
468{
469 unsigned char c;
470
fc7238bb 471 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
472 if (c == 0xCC)
473 return 1;
474
475 return 0;
476}
477\f
42995dbd 478/* Low-level function vector. */
df7e5265 479struct x86_dr_low_type x86_dr_low =
42995dbd 480 {
d33472ad
GB
481 x86_linux_dr_set_control,
482 x86_linux_dr_set_addr,
483 x86_linux_dr_get_addr,
484 x86_linux_dr_get_status,
485 x86_linux_dr_get_control,
42995dbd
GB
486 sizeof (void *),
487 };
aa5ca48f 488\f
90d74c30 489/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
490
491static int
802e8e6d
PA
492x86_supports_z_point_type (char z_type)
493{
494 switch (z_type)
495 {
496 case Z_PACKET_SW_BP:
497 case Z_PACKET_HW_BP:
498 case Z_PACKET_WRITE_WP:
499 case Z_PACKET_ACCESS_WP:
500 return 1;
501 default:
502 return 0;
503 }
504}
505
506static int
507x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
508 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
509{
510 struct process_info *proc = current_process ();
802e8e6d 511
aa5ca48f
DE
512 switch (type)
513 {
802e8e6d
PA
514 case raw_bkpt_type_hw:
515 case raw_bkpt_type_write_wp:
516 case raw_bkpt_type_access_wp:
a4165e94 517 {
802e8e6d
PA
518 enum target_hw_bp_type hw_type
519 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 520 struct x86_debug_reg_state *state
fe978cb0 521 = &proc->priv->arch_private->debug_reg_state;
a4165e94 522
df7e5265 523 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 524 }
961bd387 525
aa5ca48f
DE
526 default:
527 /* Unsupported. */
528 return 1;
529 }
530}
531
532static int
802e8e6d
PA
533x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
534 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
535{
536 struct process_info *proc = current_process ();
802e8e6d 537
aa5ca48f
DE
538 switch (type)
539 {
802e8e6d
PA
540 case raw_bkpt_type_hw:
541 case raw_bkpt_type_write_wp:
542 case raw_bkpt_type_access_wp:
a4165e94 543 {
802e8e6d
PA
544 enum target_hw_bp_type hw_type
545 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 546 struct x86_debug_reg_state *state
fe978cb0 547 = &proc->priv->arch_private->debug_reg_state;
a4165e94 548
df7e5265 549 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 550 }
aa5ca48f
DE
551 default:
552 /* Unsupported. */
553 return 1;
554 }
555}
556
557static int
558x86_stopped_by_watchpoint (void)
559{
560 struct process_info *proc = current_process ();
fe978cb0 561 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
562}
563
564static CORE_ADDR
565x86_stopped_data_address (void)
566{
567 struct process_info *proc = current_process ();
568 CORE_ADDR addr;
fe978cb0 569 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 570 &addr))
aa5ca48f
DE
571 return addr;
572 return 0;
573}
574\f
575/* Called when a new process is created. */
576
577static struct arch_process_info *
578x86_linux_new_process (void)
579{
ed859da7 580 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 581
df7e5265 582 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
583
584 return info;
585}
586
3a8a0396
DB
587/* Target routine for linux_new_fork. */
588
589static void
590x86_linux_new_fork (struct process_info *parent, struct process_info *child)
591{
592 /* These are allocated by linux_add_process. */
593 gdb_assert (parent->priv != NULL
594 && parent->priv->arch_private != NULL);
595 gdb_assert (child->priv != NULL
596 && child->priv->arch_private != NULL);
597
598 /* Linux kernel before 2.6.33 commit
599 72f674d203cd230426437cdcf7dd6f681dad8b0d
600 will inherit hardware debug registers from parent
601 on fork/vfork/clone. Newer Linux kernels create such tasks with
602 zeroed debug registers.
603
604 GDB core assumes the child inherits the watchpoints/hw
605 breakpoints of the parent, and will remove them all from the
606 forked off process. Copy the debug registers mirrors into the
607 new process so that all breakpoints and watchpoints can be
608 removed together. The debug registers mirror will become zeroed
609 in the end before detaching the forked off process, thus making
610 this compatible with older Linux kernels too. */
611
612 *child->priv->arch_private = *parent->priv->arch_private;
613}
614
70a0bb6b
GB
615/* See nat/x86-dregs.h. */
616
617struct x86_debug_reg_state *
618x86_debug_reg_state (pid_t pid)
619{
620 struct process_info *proc = find_process_pid (pid);
621
622 return &proc->priv->arch_private->debug_reg_state;
623}
aa5ca48f 624\f
d0722149
DE
625/* When GDBSERVER is built as a 64-bit application on linux, the
626 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
627 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
628 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
629 conversion in-place ourselves. */
630
9cf12d57 631/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
632 layout of the inferiors' architecture. Returns true if any
633 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 634 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
635 INF. */
636
637static int
9cf12d57 638x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
639{
640#ifdef __x86_64__
760256f9 641 unsigned int machine;
0bfdf32f 642 int tid = lwpid_of (current_thread);
760256f9
PA
643 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
644
d0722149 645 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 646 if (!is_64bit_tdesc ())
9cf12d57 647 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 648 FIXUP_32);
c92b5177 649 /* No fixup for native x32 GDB. */
760256f9 650 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 651 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 652 FIXUP_X32);
d0722149
DE
653#endif
654
655 return 0;
656}
657\f
1570b33e
L
658static int use_xml;
659
3aee8918
PA
660/* Format of XSAVE extended state is:
661 struct
662 {
663 fxsave_bytes[0..463]
664 sw_usable_bytes[464..511]
665 xstate_hdr_bytes[512..575]
666 avx_bytes[576..831]
667 future_state etc
668 };
669
670 Same memory layout will be used for the coredump NT_X86_XSTATE
671 representing the XSAVE extended state registers.
672
673 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
674 extended state mask, which is the same as the extended control register
675 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
676 together with the mask saved in the xstate_hdr_bytes to determine what
677 states the processor/OS supports and what state, used or initialized,
678 the process/thread is in. */
679#define I386_LINUX_XSAVE_XCR0_OFFSET 464
680
681/* Does the current host support the GETFPXREGS request? The header
682 file may or may not define it, and even if it is defined, the
683 kernel will return EIO if it's running on a pre-SSE processor. */
684int have_ptrace_getfpxregs =
685#ifdef HAVE_PTRACE_GETFPXREGS
686 -1
687#else
688 0
689#endif
690;
1570b33e 691
3aee8918
PA
692/* Get Linux/x86 target description from running target. */
693
694static const struct target_desc *
695x86_linux_read_description (void)
1570b33e 696{
3aee8918
PA
697 unsigned int machine;
698 int is_elf64;
a196ebeb 699 int xcr0_features;
3aee8918
PA
700 int tid;
701 static uint64_t xcr0;
3a13a53b 702 struct regset_info *regset;
1570b33e 703
0bfdf32f 704 tid = lwpid_of (current_thread);
1570b33e 705
3aee8918 706 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 707
3aee8918 708 if (sizeof (void *) == 4)
3a13a53b 709 {
3aee8918
PA
710 if (is_elf64 > 0)
711 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
712#ifndef __x86_64__
713 else if (machine == EM_X86_64)
714 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
715#endif
716 }
3a13a53b 717
3aee8918
PA
718#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
719 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
720 {
721 elf_fpxregset_t fpxregs;
3a13a53b 722
3aee8918 723 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 724 {
3aee8918
PA
725 have_ptrace_getfpxregs = 0;
726 have_ptrace_getregset = 0;
727 return tdesc_i386_mmx_linux;
3a13a53b 728 }
3aee8918
PA
729 else
730 have_ptrace_getfpxregs = 1;
3a13a53b 731 }
1570b33e
L
732#endif
733
734 if (!use_xml)
735 {
df7e5265 736 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 737
1570b33e
L
738 /* Don't use XML. */
739#ifdef __x86_64__
3aee8918
PA
740 if (machine == EM_X86_64)
741 return tdesc_amd64_linux_no_xml;
1570b33e 742 else
1570b33e 743#endif
3aee8918 744 return tdesc_i386_linux_no_xml;
1570b33e
L
745 }
746
1570b33e
L
747 if (have_ptrace_getregset == -1)
748 {
df7e5265 749 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 750 struct iovec iov;
1570b33e
L
751
752 iov.iov_base = xstateregs;
753 iov.iov_len = sizeof (xstateregs);
754
755 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
756 if (ptrace (PTRACE_GETREGSET, tid,
757 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
758 have_ptrace_getregset = 0;
759 else
1570b33e 760 {
3aee8918
PA
761 have_ptrace_getregset = 1;
762
763 /* Get XCR0 from XSAVE extended state. */
764 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
765 / sizeof (uint64_t))];
766
767 /* Use PTRACE_GETREGSET if it is available. */
768 for (regset = x86_regsets;
769 regset->fill_function != NULL; regset++)
770 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 771 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
772 else if (regset->type != GENERAL_REGS)
773 regset->size = 0;
1570b33e 774 }
1570b33e
L
775 }
776
3aee8918 777 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 778 xcr0_features = (have_ptrace_getregset
df7e5265 779 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 780
a196ebeb 781 if (xcr0_features)
3aee8918 782 x86_xcr0 = xcr0;
1570b33e 783
3aee8918
PA
784 if (machine == EM_X86_64)
785 {
1570b33e 786#ifdef __x86_64__
a196ebeb 787 if (is_elf64)
3aee8918 788 {
a196ebeb
WT
789 if (xcr0_features)
790 {
df7e5265 791 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 792 {
df7e5265 793 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
794 return tdesc_amd64_avx512_linux;
795
2b863f51
WT
796 case X86_XSTATE_AVX_MPX_MASK:
797 return tdesc_amd64_avx_mpx_linux;
798
df7e5265 799 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
800 return tdesc_amd64_mpx_linux;
801
df7e5265 802 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
803 return tdesc_amd64_avx_linux;
804
805 default:
806 return tdesc_amd64_linux;
807 }
808 }
4d47af5c 809 else
a196ebeb 810 return tdesc_amd64_linux;
3aee8918
PA
811 }
812 else
813 {
a196ebeb
WT
814 if (xcr0_features)
815 {
df7e5265 816 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 817 {
df7e5265 818 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
819 return tdesc_x32_avx512_linux;
820
df7e5265
GB
821 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
822 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
823 return tdesc_x32_avx_linux;
824
825 default:
826 return tdesc_x32_linux;
827 }
828 }
3aee8918 829 else
a196ebeb 830 return tdesc_x32_linux;
1570b33e 831 }
3aee8918 832#endif
1570b33e 833 }
3aee8918
PA
834 else
835 {
a196ebeb
WT
836 if (xcr0_features)
837 {
df7e5265 838 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 839 {
df7e5265 840 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
841 return tdesc_i386_avx512_linux;
842
df7e5265 843 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
844 return tdesc_i386_mpx_linux;
845
2b863f51
WT
846 case (X86_XSTATE_AVX_MPX_MASK):
847 return tdesc_i386_avx_mpx_linux;
848
df7e5265 849 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
850 return tdesc_i386_avx_linux;
851
852 default:
853 return tdesc_i386_linux;
854 }
855 }
3aee8918
PA
856 else
857 return tdesc_i386_linux;
858 }
859
860 gdb_assert_not_reached ("failed to return tdesc");
861}
862
863/* Callback for find_inferior. Stops iteration when a thread with a
864 given PID is found. */
865
866static int
867same_process_callback (struct inferior_list_entry *entry, void *data)
868{
869 int pid = *(int *) data;
870
871 return (ptid_get_pid (entry->id) == pid);
872}
873
874/* Callback for for_each_inferior. Calls the arch_setup routine for
875 each process. */
876
877static void
878x86_arch_setup_process_callback (struct inferior_list_entry *entry)
879{
880 int pid = ptid_get_pid (entry->id);
881
882 /* Look up any thread of this processes. */
0bfdf32f 883 current_thread
3aee8918
PA
884 = (struct thread_info *) find_inferior (&all_threads,
885 same_process_callback, &pid);
886
887 the_low_target.arch_setup ();
888}
889
890/* Update all the target description of all processes; a new GDB
891 connected, and it may or not support xml target descriptions. */
892
893static void
894x86_linux_update_xmltarget (void)
895{
0bfdf32f 896 struct thread_info *saved_thread = current_thread;
3aee8918
PA
897
898 /* Before changing the register cache's internal layout, flush the
899 contents of the current valid caches back to the threads, and
900 release the current regcache objects. */
901 regcache_release ();
902
903 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
904
0bfdf32f 905 current_thread = saved_thread;
1570b33e
L
906}
907
908/* Process qSupported query, "xmlRegisters=". Update the buffer size for
909 PTRACE_GETREGSET. */
910
911static void
06e03fff 912x86_linux_process_qsupported (char **features, int count)
1570b33e 913{
06e03fff
PA
914 int i;
915
1570b33e
L
916 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
917 with "i386" in qSupported query, it supports x86 XML target
918 descriptions. */
919 use_xml = 0;
06e03fff 920 for (i = 0; i < count; i++)
1570b33e 921 {
06e03fff 922 const char *feature = features[i];
1570b33e 923
06e03fff 924 if (startswith (feature, "xmlRegisters="))
1570b33e 925 {
06e03fff
PA
926 char *copy = xstrdup (feature + 13);
927 char *p;
928
929 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1570b33e 930 {
06e03fff
PA
931 if (strcmp (p, "i386") == 0)
932 {
933 use_xml = 1;
934 break;
935 }
1570b33e 936 }
1570b33e 937
06e03fff
PA
938 free (copy);
939 }
1570b33e 940 }
1570b33e
L
941 x86_linux_update_xmltarget ();
942}
943
3aee8918 944/* Common for x86/x86-64. */
d0722149 945
3aee8918
PA
946static struct regsets_info x86_regsets_info =
947 {
948 x86_regsets, /* regsets */
949 0, /* num_regsets */
950 NULL, /* disabled_regsets */
951 };
214d508e
L
952
953#ifdef __x86_64__
3aee8918
PA
954static struct regs_info amd64_linux_regs_info =
955 {
956 NULL, /* regset_bitmap */
957 NULL, /* usrregs_info */
958 &x86_regsets_info
959 };
d0722149 960#endif
3aee8918
PA
961static struct usrregs_info i386_linux_usrregs_info =
962 {
963 I386_NUM_REGS,
964 i386_regmap,
965 };
d0722149 966
3aee8918
PA
967static struct regs_info i386_linux_regs_info =
968 {
969 NULL, /* regset_bitmap */
970 &i386_linux_usrregs_info,
971 &x86_regsets_info
972 };
d0722149 973
3aee8918
PA
974const struct regs_info *
975x86_linux_regs_info (void)
976{
977#ifdef __x86_64__
978 if (is_64bit_tdesc ())
979 return &amd64_linux_regs_info;
980 else
981#endif
982 return &i386_linux_regs_info;
983}
d0722149 984
3aee8918
PA
985/* Initialize the target description for the architecture of the
986 inferior. */
1570b33e 987
3aee8918
PA
988static void
989x86_arch_setup (void)
990{
991 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
992}
993
82075af2
JS
994/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
995 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
996
997static void
4cc32bec 998x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
999{
1000 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1001
1002 if (use_64bit)
1003 {
1004 long l_sysno;
82075af2
JS
1005
1006 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1007 *sysno = (int) l_sysno;
82075af2
JS
1008 }
1009 else
4cc32bec 1010 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1011}
1012
219f2f23
PA
1013static int
1014x86_supports_tracepoints (void)
1015{
1016 return 1;
1017}
1018
fa593d66
PA
1019static void
1020append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1021{
1022 write_inferior_memory (*to, buf, len);
1023 *to += len;
1024}
1025
1026static int
1027push_opcode (unsigned char *buf, char *op)
1028{
1029 unsigned char *buf_org = buf;
1030
1031 while (1)
1032 {
1033 char *endptr;
1034 unsigned long ul = strtoul (op, &endptr, 16);
1035
1036 if (endptr == op)
1037 break;
1038
1039 *buf++ = ul;
1040 op = endptr;
1041 }
1042
1043 return buf - buf_org;
1044}
1045
1046#ifdef __x86_64__
1047
1048/* Build a jump pad that saves registers and calls a collection
1049 function. Writes a jump instruction to the jump pad to
1050 JJUMPAD_INSN. The caller is responsible to write it in at the
1051 tracepoint address. */
1052
1053static int
1054amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1055 CORE_ADDR collector,
1056 CORE_ADDR lockaddr,
1057 ULONGEST orig_size,
1058 CORE_ADDR *jump_entry,
405f8e94
SS
1059 CORE_ADDR *trampoline,
1060 ULONGEST *trampoline_size,
fa593d66
PA
1061 unsigned char *jjump_pad_insn,
1062 ULONGEST *jjump_pad_insn_size,
1063 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1064 CORE_ADDR *adjusted_insn_addr_end,
1065 char *err)
fa593d66
PA
1066{
1067 unsigned char buf[40];
1068 int i, offset;
f4647387
YQ
1069 int64_t loffset;
1070
fa593d66
PA
1071 CORE_ADDR buildaddr = *jump_entry;
1072
1073 /* Build the jump pad. */
1074
1075 /* First, do tracepoint data collection. Save registers. */
1076 i = 0;
1077 /* Need to ensure stack pointer saved first. */
1078 buf[i++] = 0x54; /* push %rsp */
1079 buf[i++] = 0x55; /* push %rbp */
1080 buf[i++] = 0x57; /* push %rdi */
1081 buf[i++] = 0x56; /* push %rsi */
1082 buf[i++] = 0x52; /* push %rdx */
1083 buf[i++] = 0x51; /* push %rcx */
1084 buf[i++] = 0x53; /* push %rbx */
1085 buf[i++] = 0x50; /* push %rax */
1086 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1087 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1088 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1089 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1090 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1091 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1092 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1093 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1094 buf[i++] = 0x9c; /* pushfq */
1095 buf[i++] = 0x48; /* movl <addr>,%rdi */
1096 buf[i++] = 0xbf;
1097 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1098 i += sizeof (unsigned long);
1099 buf[i++] = 0x57; /* push %rdi */
1100 append_insns (&buildaddr, i, buf);
1101
1102 /* Stack space for the collecting_t object. */
1103 i = 0;
1104 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1105 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1106 memcpy (buf + i, &tpoint, 8);
1107 i += 8;
1108 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1109 i += push_opcode (&buf[i],
1110 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1111 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1112 append_insns (&buildaddr, i, buf);
1113
1114 /* spin-lock. */
1115 i = 0;
1116 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1117 memcpy (&buf[i], (void *) &lockaddr, 8);
1118 i += 8;
1119 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1120 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1121 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1122 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1123 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1124 append_insns (&buildaddr, i, buf);
1125
1126 /* Set up the gdb_collect call. */
1127 /* At this point, (stack pointer + 0x18) is the base of our saved
1128 register block. */
1129
1130 i = 0;
1131 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1132 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1133
1134 /* tpoint address may be 64-bit wide. */
1135 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1136 memcpy (buf + i, &tpoint, 8);
1137 i += 8;
1138 append_insns (&buildaddr, i, buf);
1139
1140 /* The collector function being in the shared library, may be
1141 >31-bits away off the jump pad. */
1142 i = 0;
1143 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1144 memcpy (buf + i, &collector, 8);
1145 i += 8;
1146 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1147 append_insns (&buildaddr, i, buf);
1148
1149 /* Clear the spin-lock. */
1150 i = 0;
1151 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1152 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1153 memcpy (buf + i, &lockaddr, 8);
1154 i += 8;
1155 append_insns (&buildaddr, i, buf);
1156
1157 /* Remove stack that had been used for the collect_t object. */
1158 i = 0;
1159 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1160 append_insns (&buildaddr, i, buf);
1161
1162 /* Restore register state. */
1163 i = 0;
1164 buf[i++] = 0x48; /* add $0x8,%rsp */
1165 buf[i++] = 0x83;
1166 buf[i++] = 0xc4;
1167 buf[i++] = 0x08;
1168 buf[i++] = 0x9d; /* popfq */
1169 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1170 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1171 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1172 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1173 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1174 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1175 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1176 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1177 buf[i++] = 0x58; /* pop %rax */
1178 buf[i++] = 0x5b; /* pop %rbx */
1179 buf[i++] = 0x59; /* pop %rcx */
1180 buf[i++] = 0x5a; /* pop %rdx */
1181 buf[i++] = 0x5e; /* pop %rsi */
1182 buf[i++] = 0x5f; /* pop %rdi */
1183 buf[i++] = 0x5d; /* pop %rbp */
1184 buf[i++] = 0x5c; /* pop %rsp */
1185 append_insns (&buildaddr, i, buf);
1186
1187 /* Now, adjust the original instruction to execute in the jump
1188 pad. */
1189 *adjusted_insn_addr = buildaddr;
1190 relocate_instruction (&buildaddr, tpaddr);
1191 *adjusted_insn_addr_end = buildaddr;
1192
1193 /* Finally, write a jump back to the program. */
f4647387
YQ
1194
1195 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1196 if (loffset > INT_MAX || loffset < INT_MIN)
1197 {
1198 sprintf (err,
1199 "E.Jump back from jump pad too far from tracepoint "
1200 "(offset 0x%" PRIx64 " > int32).", loffset);
1201 return 1;
1202 }
1203
1204 offset = (int) loffset;
fa593d66
PA
1205 memcpy (buf, jump_insn, sizeof (jump_insn));
1206 memcpy (buf + 1, &offset, 4);
1207 append_insns (&buildaddr, sizeof (jump_insn), buf);
1208
1209 /* The jump pad is now built. Wire in a jump to our jump pad. This
1210 is always done last (by our caller actually), so that we can
1211 install fast tracepoints with threads running. This relies on
1212 the agent's atomic write support. */
f4647387
YQ
1213 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1214 if (loffset > INT_MAX || loffset < INT_MIN)
1215 {
1216 sprintf (err,
1217 "E.Jump pad too far from tracepoint "
1218 "(offset 0x%" PRIx64 " > int32).", loffset);
1219 return 1;
1220 }
1221
1222 offset = (int) loffset;
1223
fa593d66
PA
1224 memcpy (buf, jump_insn, sizeof (jump_insn));
1225 memcpy (buf + 1, &offset, 4);
1226 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1227 *jjump_pad_insn_size = sizeof (jump_insn);
1228
1229 /* Return the end address of our pad. */
1230 *jump_entry = buildaddr;
1231
1232 return 0;
1233}
1234
1235#endif /* __x86_64__ */
1236
1237/* Build a jump pad that saves registers and calls a collection
1238 function. Writes a jump instruction to the jump pad to
1239 JJUMPAD_INSN. The caller is responsible to write it in at the
1240 tracepoint address. */
1241
1242static int
1243i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1244 CORE_ADDR collector,
1245 CORE_ADDR lockaddr,
1246 ULONGEST orig_size,
1247 CORE_ADDR *jump_entry,
405f8e94
SS
1248 CORE_ADDR *trampoline,
1249 ULONGEST *trampoline_size,
fa593d66
PA
1250 unsigned char *jjump_pad_insn,
1251 ULONGEST *jjump_pad_insn_size,
1252 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1253 CORE_ADDR *adjusted_insn_addr_end,
1254 char *err)
fa593d66
PA
1255{
1256 unsigned char buf[0x100];
1257 int i, offset;
1258 CORE_ADDR buildaddr = *jump_entry;
1259
1260 /* Build the jump pad. */
1261
1262 /* First, do tracepoint data collection. Save registers. */
1263 i = 0;
1264 buf[i++] = 0x60; /* pushad */
1265 buf[i++] = 0x68; /* push tpaddr aka $pc */
1266 *((int *)(buf + i)) = (int) tpaddr;
1267 i += 4;
1268 buf[i++] = 0x9c; /* pushf */
1269 buf[i++] = 0x1e; /* push %ds */
1270 buf[i++] = 0x06; /* push %es */
1271 buf[i++] = 0x0f; /* push %fs */
1272 buf[i++] = 0xa0;
1273 buf[i++] = 0x0f; /* push %gs */
1274 buf[i++] = 0xa8;
1275 buf[i++] = 0x16; /* push %ss */
1276 buf[i++] = 0x0e; /* push %cs */
1277 append_insns (&buildaddr, i, buf);
1278
1279 /* Stack space for the collecting_t object. */
1280 i = 0;
1281 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1282
1283 /* Build the object. */
1284 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1285 memcpy (buf + i, &tpoint, 4);
1286 i += 4;
1287 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1288
1289 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1290 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1291 append_insns (&buildaddr, i, buf);
1292
1293 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1294 If we cared for it, this could be using xchg alternatively. */
1295
1296 i = 0;
1297 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1298 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1299 %esp,<lockaddr> */
1300 memcpy (&buf[i], (void *) &lockaddr, 4);
1301 i += 4;
1302 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1303 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1304 append_insns (&buildaddr, i, buf);
1305
1306
1307 /* Set up arguments to the gdb_collect call. */
1308 i = 0;
1309 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1310 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1311 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1312 append_insns (&buildaddr, i, buf);
1313
1314 i = 0;
1315 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1316 append_insns (&buildaddr, i, buf);
1317
1318 i = 0;
1319 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1320 memcpy (&buf[i], (void *) &tpoint, 4);
1321 i += 4;
1322 append_insns (&buildaddr, i, buf);
1323
1324 buf[0] = 0xe8; /* call <reladdr> */
1325 offset = collector - (buildaddr + sizeof (jump_insn));
1326 memcpy (buf + 1, &offset, 4);
1327 append_insns (&buildaddr, 5, buf);
1328 /* Clean up after the call. */
1329 buf[0] = 0x83; /* add $0x8,%esp */
1330 buf[1] = 0xc4;
1331 buf[2] = 0x08;
1332 append_insns (&buildaddr, 3, buf);
1333
1334
1335 /* Clear the spin-lock. This would need the LOCK prefix on older
1336 broken archs. */
1337 i = 0;
1338 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1339 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1340 memcpy (buf + i, &lockaddr, 4);
1341 i += 4;
1342 append_insns (&buildaddr, i, buf);
1343
1344
1345 /* Remove stack that had been used for the collect_t object. */
1346 i = 0;
1347 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1348 append_insns (&buildaddr, i, buf);
1349
1350 i = 0;
1351 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1352 buf[i++] = 0xc4;
1353 buf[i++] = 0x04;
1354 buf[i++] = 0x17; /* pop %ss */
1355 buf[i++] = 0x0f; /* pop %gs */
1356 buf[i++] = 0xa9;
1357 buf[i++] = 0x0f; /* pop %fs */
1358 buf[i++] = 0xa1;
1359 buf[i++] = 0x07; /* pop %es */
405f8e94 1360 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1361 buf[i++] = 0x9d; /* popf */
1362 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1363 buf[i++] = 0xc4;
1364 buf[i++] = 0x04;
1365 buf[i++] = 0x61; /* popad */
1366 append_insns (&buildaddr, i, buf);
1367
1368 /* Now, adjust the original instruction to execute in the jump
1369 pad. */
1370 *adjusted_insn_addr = buildaddr;
1371 relocate_instruction (&buildaddr, tpaddr);
1372 *adjusted_insn_addr_end = buildaddr;
1373
1374 /* Write the jump back to the program. */
1375 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1376 memcpy (buf, jump_insn, sizeof (jump_insn));
1377 memcpy (buf + 1, &offset, 4);
1378 append_insns (&buildaddr, sizeof (jump_insn), buf);
1379
1380 /* The jump pad is now built. Wire in a jump to our jump pad. This
1381 is always done last (by our caller actually), so that we can
1382 install fast tracepoints with threads running. This relies on
1383 the agent's atomic write support. */
405f8e94
SS
1384 if (orig_size == 4)
1385 {
1386 /* Create a trampoline. */
1387 *trampoline_size = sizeof (jump_insn);
1388 if (!claim_trampoline_space (*trampoline_size, trampoline))
1389 {
1390 /* No trampoline space available. */
1391 strcpy (err,
1392 "E.Cannot allocate trampoline space needed for fast "
1393 "tracepoints on 4-byte instructions.");
1394 return 1;
1395 }
1396
1397 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1398 memcpy (buf, jump_insn, sizeof (jump_insn));
1399 memcpy (buf + 1, &offset, 4);
1400 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1401
1402 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1403 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1404 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1405 memcpy (buf + 2, &offset, 2);
1406 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1407 *jjump_pad_insn_size = sizeof (small_jump_insn);
1408 }
1409 else
1410 {
1411 /* Else use a 32-bit relative jump instruction. */
1412 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1413 memcpy (buf, jump_insn, sizeof (jump_insn));
1414 memcpy (buf + 1, &offset, 4);
1415 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1416 *jjump_pad_insn_size = sizeof (jump_insn);
1417 }
fa593d66
PA
1418
1419 /* Return the end address of our pad. */
1420 *jump_entry = buildaddr;
1421
1422 return 0;
1423}
1424
1425static int
1426x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1427 CORE_ADDR collector,
1428 CORE_ADDR lockaddr,
1429 ULONGEST orig_size,
1430 CORE_ADDR *jump_entry,
405f8e94
SS
1431 CORE_ADDR *trampoline,
1432 ULONGEST *trampoline_size,
fa593d66
PA
1433 unsigned char *jjump_pad_insn,
1434 ULONGEST *jjump_pad_insn_size,
1435 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1436 CORE_ADDR *adjusted_insn_addr_end,
1437 char *err)
fa593d66
PA
1438{
1439#ifdef __x86_64__
3aee8918 1440 if (is_64bit_tdesc ())
fa593d66
PA
1441 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1442 collector, lockaddr,
1443 orig_size, jump_entry,
405f8e94 1444 trampoline, trampoline_size,
fa593d66
PA
1445 jjump_pad_insn,
1446 jjump_pad_insn_size,
1447 adjusted_insn_addr,
405f8e94
SS
1448 adjusted_insn_addr_end,
1449 err);
fa593d66
PA
1450#endif
1451
1452 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1453 collector, lockaddr,
1454 orig_size, jump_entry,
405f8e94 1455 trampoline, trampoline_size,
fa593d66
PA
1456 jjump_pad_insn,
1457 jjump_pad_insn_size,
1458 adjusted_insn_addr,
405f8e94
SS
1459 adjusted_insn_addr_end,
1460 err);
1461}
1462
1463/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1464 architectures. */
1465
1466static int
1467x86_get_min_fast_tracepoint_insn_len (void)
1468{
1469 static int warned_about_fast_tracepoints = 0;
1470
1471#ifdef __x86_64__
1472 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1473 used for fast tracepoints. */
3aee8918 1474 if (is_64bit_tdesc ())
405f8e94
SS
1475 return 5;
1476#endif
1477
58b4daa5 1478 if (agent_loaded_p ())
405f8e94
SS
1479 {
1480 char errbuf[IPA_BUFSIZ];
1481
1482 errbuf[0] = '\0';
1483
1484 /* On x86, if trampolines are available, then 4-byte jump instructions
1485 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1486 with a 4-byte offset are used instead. */
1487 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1488 return 4;
1489 else
1490 {
1491 /* GDB has no channel to explain to user why a shorter fast
1492 tracepoint is not possible, but at least make GDBserver
1493 mention that something has gone awry. */
1494 if (!warned_about_fast_tracepoints)
1495 {
1496 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1497 warned_about_fast_tracepoints = 1;
1498 }
1499 return 5;
1500 }
1501 }
1502 else
1503 {
1504 /* Indicate that the minimum length is currently unknown since the IPA
1505 has not loaded yet. */
1506 return 0;
1507 }
fa593d66
PA
1508}
1509
6a271cae
PA
1510static void
1511add_insns (unsigned char *start, int len)
1512{
1513 CORE_ADDR buildaddr = current_insn_ptr;
1514
1515 if (debug_threads)
87ce2a04
DE
1516 debug_printf ("Adding %d bytes of insn at %s\n",
1517 len, paddress (buildaddr));
6a271cae
PA
1518
1519 append_insns (&buildaddr, len, start);
1520 current_insn_ptr = buildaddr;
1521}
1522
6a271cae
PA
1523/* Our general strategy for emitting code is to avoid specifying raw
1524 bytes whenever possible, and instead copy a block of inline asm
1525 that is embedded in the function. This is a little messy, because
1526 we need to keep the compiler from discarding what looks like dead
1527 code, plus suppress various warnings. */
1528
9e4344e5
PA
1529#define EMIT_ASM(NAME, INSNS) \
1530 do \
1531 { \
1532 extern unsigned char start_ ## NAME, end_ ## NAME; \
1533 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1534 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1535 "\t" "start_" #NAME ":" \
1536 "\t" INSNS "\n" \
1537 "\t" "end_" #NAME ":"); \
1538 } while (0)
6a271cae
PA
1539
1540#ifdef __x86_64__
1541
1542#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1543 do \
1544 { \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1547 __asm__ (".code32\n" \
1548 "\t" "jmp end_" #NAME "\n" \
1549 "\t" "start_" #NAME ":\n" \
1550 "\t" INSNS "\n" \
1551 "\t" "end_" #NAME ":\n" \
1552 ".code64\n"); \
1553 } while (0)
6a271cae
PA
1554
1555#else
1556
1557#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1558
1559#endif
1560
1561#ifdef __x86_64__
1562
1563static void
1564amd64_emit_prologue (void)
1565{
1566 EMIT_ASM (amd64_prologue,
1567 "pushq %rbp\n\t"
1568 "movq %rsp,%rbp\n\t"
1569 "sub $0x20,%rsp\n\t"
1570 "movq %rdi,-8(%rbp)\n\t"
1571 "movq %rsi,-16(%rbp)");
1572}
1573
1574
1575static void
1576amd64_emit_epilogue (void)
1577{
1578 EMIT_ASM (amd64_epilogue,
1579 "movq -16(%rbp),%rdi\n\t"
1580 "movq %rax,(%rdi)\n\t"
1581 "xor %rax,%rax\n\t"
1582 "leave\n\t"
1583 "ret");
1584}
1585
1586static void
1587amd64_emit_add (void)
1588{
1589 EMIT_ASM (amd64_add,
1590 "add (%rsp),%rax\n\t"
1591 "lea 0x8(%rsp),%rsp");
1592}
1593
1594static void
1595amd64_emit_sub (void)
1596{
1597 EMIT_ASM (amd64_sub,
1598 "sub %rax,(%rsp)\n\t"
1599 "pop %rax");
1600}
1601
1602static void
1603amd64_emit_mul (void)
1604{
1605 emit_error = 1;
1606}
1607
1608static void
1609amd64_emit_lsh (void)
1610{
1611 emit_error = 1;
1612}
1613
1614static void
1615amd64_emit_rsh_signed (void)
1616{
1617 emit_error = 1;
1618}
1619
1620static void
1621amd64_emit_rsh_unsigned (void)
1622{
1623 emit_error = 1;
1624}
1625
1626static void
1627amd64_emit_ext (int arg)
1628{
1629 switch (arg)
1630 {
1631 case 8:
1632 EMIT_ASM (amd64_ext_8,
1633 "cbtw\n\t"
1634 "cwtl\n\t"
1635 "cltq");
1636 break;
1637 case 16:
1638 EMIT_ASM (amd64_ext_16,
1639 "cwtl\n\t"
1640 "cltq");
1641 break;
1642 case 32:
1643 EMIT_ASM (amd64_ext_32,
1644 "cltq");
1645 break;
1646 default:
1647 emit_error = 1;
1648 }
1649}
1650
1651static void
1652amd64_emit_log_not (void)
1653{
1654 EMIT_ASM (amd64_log_not,
1655 "test %rax,%rax\n\t"
1656 "sete %cl\n\t"
1657 "movzbq %cl,%rax");
1658}
1659
1660static void
1661amd64_emit_bit_and (void)
1662{
1663 EMIT_ASM (amd64_and,
1664 "and (%rsp),%rax\n\t"
1665 "lea 0x8(%rsp),%rsp");
1666}
1667
1668static void
1669amd64_emit_bit_or (void)
1670{
1671 EMIT_ASM (amd64_or,
1672 "or (%rsp),%rax\n\t"
1673 "lea 0x8(%rsp),%rsp");
1674}
1675
1676static void
1677amd64_emit_bit_xor (void)
1678{
1679 EMIT_ASM (amd64_xor,
1680 "xor (%rsp),%rax\n\t"
1681 "lea 0x8(%rsp),%rsp");
1682}
1683
1684static void
1685amd64_emit_bit_not (void)
1686{
1687 EMIT_ASM (amd64_bit_not,
1688 "xorq $0xffffffffffffffff,%rax");
1689}
1690
1691static void
1692amd64_emit_equal (void)
1693{
1694 EMIT_ASM (amd64_equal,
1695 "cmp %rax,(%rsp)\n\t"
1696 "je .Lamd64_equal_true\n\t"
1697 "xor %rax,%rax\n\t"
1698 "jmp .Lamd64_equal_end\n\t"
1699 ".Lamd64_equal_true:\n\t"
1700 "mov $0x1,%rax\n\t"
1701 ".Lamd64_equal_end:\n\t"
1702 "lea 0x8(%rsp),%rsp");
1703}
1704
1705static void
1706amd64_emit_less_signed (void)
1707{
1708 EMIT_ASM (amd64_less_signed,
1709 "cmp %rax,(%rsp)\n\t"
1710 "jl .Lamd64_less_signed_true\n\t"
1711 "xor %rax,%rax\n\t"
1712 "jmp .Lamd64_less_signed_end\n\t"
1713 ".Lamd64_less_signed_true:\n\t"
1714 "mov $1,%rax\n\t"
1715 ".Lamd64_less_signed_end:\n\t"
1716 "lea 0x8(%rsp),%rsp");
1717}
1718
1719static void
1720amd64_emit_less_unsigned (void)
1721{
1722 EMIT_ASM (amd64_less_unsigned,
1723 "cmp %rax,(%rsp)\n\t"
1724 "jb .Lamd64_less_unsigned_true\n\t"
1725 "xor %rax,%rax\n\t"
1726 "jmp .Lamd64_less_unsigned_end\n\t"
1727 ".Lamd64_less_unsigned_true:\n\t"
1728 "mov $1,%rax\n\t"
1729 ".Lamd64_less_unsigned_end:\n\t"
1730 "lea 0x8(%rsp),%rsp");
1731}
1732
1733static void
1734amd64_emit_ref (int size)
1735{
1736 switch (size)
1737 {
1738 case 1:
1739 EMIT_ASM (amd64_ref1,
1740 "movb (%rax),%al");
1741 break;
1742 case 2:
1743 EMIT_ASM (amd64_ref2,
1744 "movw (%rax),%ax");
1745 break;
1746 case 4:
1747 EMIT_ASM (amd64_ref4,
1748 "movl (%rax),%eax");
1749 break;
1750 case 8:
1751 EMIT_ASM (amd64_ref8,
1752 "movq (%rax),%rax");
1753 break;
1754 }
1755}
1756
1757static void
1758amd64_emit_if_goto (int *offset_p, int *size_p)
1759{
1760 EMIT_ASM (amd64_if_goto,
1761 "mov %rax,%rcx\n\t"
1762 "pop %rax\n\t"
1763 "cmp $0,%rcx\n\t"
1764 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1765 if (offset_p)
1766 *offset_p = 10;
1767 if (size_p)
1768 *size_p = 4;
1769}
1770
1771static void
1772amd64_emit_goto (int *offset_p, int *size_p)
1773{
1774 EMIT_ASM (amd64_goto,
1775 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1776 if (offset_p)
1777 *offset_p = 1;
1778 if (size_p)
1779 *size_p = 4;
1780}
1781
1782static void
1783amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1784{
1785 int diff = (to - (from + size));
1786 unsigned char buf[sizeof (int)];
1787
1788 if (size != 4)
1789 {
1790 emit_error = 1;
1791 return;
1792 }
1793
1794 memcpy (buf, &diff, sizeof (int));
1795 write_inferior_memory (from, buf, sizeof (int));
1796}
1797
1798static void
4e29fb54 1799amd64_emit_const (LONGEST num)
6a271cae
PA
1800{
1801 unsigned char buf[16];
1802 int i;
1803 CORE_ADDR buildaddr = current_insn_ptr;
1804
1805 i = 0;
1806 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1807 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1808 i += 8;
1809 append_insns (&buildaddr, i, buf);
1810 current_insn_ptr = buildaddr;
1811}
1812
1813static void
1814amd64_emit_call (CORE_ADDR fn)
1815{
1816 unsigned char buf[16];
1817 int i;
1818 CORE_ADDR buildaddr;
4e29fb54 1819 LONGEST offset64;
6a271cae
PA
1820
1821 /* The destination function being in the shared library, may be
1822 >31-bits away off the compiled code pad. */
1823
1824 buildaddr = current_insn_ptr;
1825
1826 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1827
1828 i = 0;
1829
1830 if (offset64 > INT_MAX || offset64 < INT_MIN)
1831 {
1832 /* Offset is too large for a call. Use callq, but that requires
1833 a register, so avoid it if possible. Use r10, since it is
1834 call-clobbered, we don't have to push/pop it. */
1835 buf[i++] = 0x48; /* mov $fn,%r10 */
1836 buf[i++] = 0xba;
1837 memcpy (buf + i, &fn, 8);
1838 i += 8;
1839 buf[i++] = 0xff; /* callq *%r10 */
1840 buf[i++] = 0xd2;
1841 }
1842 else
1843 {
1844 int offset32 = offset64; /* we know we can't overflow here. */
1845 memcpy (buf + i, &offset32, 4);
1846 i += 4;
1847 }
1848
1849 append_insns (&buildaddr, i, buf);
1850 current_insn_ptr = buildaddr;
1851}
1852
1853static void
1854amd64_emit_reg (int reg)
1855{
1856 unsigned char buf[16];
1857 int i;
1858 CORE_ADDR buildaddr;
1859
1860 /* Assume raw_regs is still in %rdi. */
1861 buildaddr = current_insn_ptr;
1862 i = 0;
1863 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1864 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1865 i += 4;
1866 append_insns (&buildaddr, i, buf);
1867 current_insn_ptr = buildaddr;
1868 amd64_emit_call (get_raw_reg_func_addr ());
1869}
1870
1871static void
1872amd64_emit_pop (void)
1873{
1874 EMIT_ASM (amd64_pop,
1875 "pop %rax");
1876}
1877
1878static void
1879amd64_emit_stack_flush (void)
1880{
1881 EMIT_ASM (amd64_stack_flush,
1882 "push %rax");
1883}
1884
1885static void
1886amd64_emit_zero_ext (int arg)
1887{
1888 switch (arg)
1889 {
1890 case 8:
1891 EMIT_ASM (amd64_zero_ext_8,
1892 "and $0xff,%rax");
1893 break;
1894 case 16:
1895 EMIT_ASM (amd64_zero_ext_16,
1896 "and $0xffff,%rax");
1897 break;
1898 case 32:
1899 EMIT_ASM (amd64_zero_ext_32,
1900 "mov $0xffffffff,%rcx\n\t"
1901 "and %rcx,%rax");
1902 break;
1903 default:
1904 emit_error = 1;
1905 }
1906}
1907
1908static void
1909amd64_emit_swap (void)
1910{
1911 EMIT_ASM (amd64_swap,
1912 "mov %rax,%rcx\n\t"
1913 "pop %rax\n\t"
1914 "push %rcx");
1915}
1916
1917static void
1918amd64_emit_stack_adjust (int n)
1919{
1920 unsigned char buf[16];
1921 int i;
1922 CORE_ADDR buildaddr = current_insn_ptr;
1923
1924 i = 0;
1925 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1926 buf[i++] = 0x8d;
1927 buf[i++] = 0x64;
1928 buf[i++] = 0x24;
1929 /* This only handles adjustments up to 16, but we don't expect any more. */
1930 buf[i++] = n * 8;
1931 append_insns (&buildaddr, i, buf);
1932 current_insn_ptr = buildaddr;
1933}
1934
1935/* FN's prototype is `LONGEST(*fn)(int)'. */
1936
1937static void
1938amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1939{
1940 unsigned char buf[16];
1941 int i;
1942 CORE_ADDR buildaddr;
1943
1944 buildaddr = current_insn_ptr;
1945 i = 0;
1946 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1947 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1948 i += 4;
1949 append_insns (&buildaddr, i, buf);
1950 current_insn_ptr = buildaddr;
1951 amd64_emit_call (fn);
1952}
1953
4e29fb54 1954/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1955
1956static void
1957amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1958{
1959 unsigned char buf[16];
1960 int i;
1961 CORE_ADDR buildaddr;
1962
1963 buildaddr = current_insn_ptr;
1964 i = 0;
1965 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1966 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1967 i += 4;
1968 append_insns (&buildaddr, i, buf);
1969 current_insn_ptr = buildaddr;
1970 EMIT_ASM (amd64_void_call_2_a,
1971 /* Save away a copy of the stack top. */
1972 "push %rax\n\t"
1973 /* Also pass top as the second argument. */
1974 "mov %rax,%rsi");
1975 amd64_emit_call (fn);
1976 EMIT_ASM (amd64_void_call_2_b,
1977 /* Restore the stack top, %rax may have been trashed. */
1978 "pop %rax");
1979}
1980
6b9801d4
SS
1981void
1982amd64_emit_eq_goto (int *offset_p, int *size_p)
1983{
1984 EMIT_ASM (amd64_eq,
1985 "cmp %rax,(%rsp)\n\t"
1986 "jne .Lamd64_eq_fallthru\n\t"
1987 "lea 0x8(%rsp),%rsp\n\t"
1988 "pop %rax\n\t"
1989 /* jmp, but don't trust the assembler to choose the right jump */
1990 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1991 ".Lamd64_eq_fallthru:\n\t"
1992 "lea 0x8(%rsp),%rsp\n\t"
1993 "pop %rax");
1994
1995 if (offset_p)
1996 *offset_p = 13;
1997 if (size_p)
1998 *size_p = 4;
1999}
2000
2001void
2002amd64_emit_ne_goto (int *offset_p, int *size_p)
2003{
2004 EMIT_ASM (amd64_ne,
2005 "cmp %rax,(%rsp)\n\t"
2006 "je .Lamd64_ne_fallthru\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2008 "pop %rax\n\t"
2009 /* jmp, but don't trust the assembler to choose the right jump */
2010 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2011 ".Lamd64_ne_fallthru:\n\t"
2012 "lea 0x8(%rsp),%rsp\n\t"
2013 "pop %rax");
2014
2015 if (offset_p)
2016 *offset_p = 13;
2017 if (size_p)
2018 *size_p = 4;
2019}
2020
2021void
2022amd64_emit_lt_goto (int *offset_p, int *size_p)
2023{
2024 EMIT_ASM (amd64_lt,
2025 "cmp %rax,(%rsp)\n\t"
2026 "jnl .Lamd64_lt_fallthru\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2028 "pop %rax\n\t"
2029 /* jmp, but don't trust the assembler to choose the right jump */
2030 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2031 ".Lamd64_lt_fallthru:\n\t"
2032 "lea 0x8(%rsp),%rsp\n\t"
2033 "pop %rax");
2034
2035 if (offset_p)
2036 *offset_p = 13;
2037 if (size_p)
2038 *size_p = 4;
2039}
2040
2041void
2042amd64_emit_le_goto (int *offset_p, int *size_p)
2043{
2044 EMIT_ASM (amd64_le,
2045 "cmp %rax,(%rsp)\n\t"
2046 "jnle .Lamd64_le_fallthru\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2048 "pop %rax\n\t"
2049 /* jmp, but don't trust the assembler to choose the right jump */
2050 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2051 ".Lamd64_le_fallthru:\n\t"
2052 "lea 0x8(%rsp),%rsp\n\t"
2053 "pop %rax");
2054
2055 if (offset_p)
2056 *offset_p = 13;
2057 if (size_p)
2058 *size_p = 4;
2059}
2060
2061void
2062amd64_emit_gt_goto (int *offset_p, int *size_p)
2063{
2064 EMIT_ASM (amd64_gt,
2065 "cmp %rax,(%rsp)\n\t"
2066 "jng .Lamd64_gt_fallthru\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2068 "pop %rax\n\t"
2069 /* jmp, but don't trust the assembler to choose the right jump */
2070 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2071 ".Lamd64_gt_fallthru:\n\t"
2072 "lea 0x8(%rsp),%rsp\n\t"
2073 "pop %rax");
2074
2075 if (offset_p)
2076 *offset_p = 13;
2077 if (size_p)
2078 *size_p = 4;
2079}
2080
2081void
2082amd64_emit_ge_goto (int *offset_p, int *size_p)
2083{
2084 EMIT_ASM (amd64_ge,
2085 "cmp %rax,(%rsp)\n\t"
2086 "jnge .Lamd64_ge_fallthru\n\t"
2087 ".Lamd64_ge_jump:\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2089 "pop %rax\n\t"
2090 /* jmp, but don't trust the assembler to choose the right jump */
2091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2092 ".Lamd64_ge_fallthru:\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax");
2095
2096 if (offset_p)
2097 *offset_p = 13;
2098 if (size_p)
2099 *size_p = 4;
2100}
2101
6a271cae
PA
2102struct emit_ops amd64_emit_ops =
2103 {
2104 amd64_emit_prologue,
2105 amd64_emit_epilogue,
2106 amd64_emit_add,
2107 amd64_emit_sub,
2108 amd64_emit_mul,
2109 amd64_emit_lsh,
2110 amd64_emit_rsh_signed,
2111 amd64_emit_rsh_unsigned,
2112 amd64_emit_ext,
2113 amd64_emit_log_not,
2114 amd64_emit_bit_and,
2115 amd64_emit_bit_or,
2116 amd64_emit_bit_xor,
2117 amd64_emit_bit_not,
2118 amd64_emit_equal,
2119 amd64_emit_less_signed,
2120 amd64_emit_less_unsigned,
2121 amd64_emit_ref,
2122 amd64_emit_if_goto,
2123 amd64_emit_goto,
2124 amd64_write_goto_address,
2125 amd64_emit_const,
2126 amd64_emit_call,
2127 amd64_emit_reg,
2128 amd64_emit_pop,
2129 amd64_emit_stack_flush,
2130 amd64_emit_zero_ext,
2131 amd64_emit_swap,
2132 amd64_emit_stack_adjust,
2133 amd64_emit_int_call_1,
6b9801d4
SS
2134 amd64_emit_void_call_2,
2135 amd64_emit_eq_goto,
2136 amd64_emit_ne_goto,
2137 amd64_emit_lt_goto,
2138 amd64_emit_le_goto,
2139 amd64_emit_gt_goto,
2140 amd64_emit_ge_goto
6a271cae
PA
2141 };
2142
2143#endif /* __x86_64__ */
2144
2145static void
2146i386_emit_prologue (void)
2147{
2148 EMIT_ASM32 (i386_prologue,
2149 "push %ebp\n\t"
bf15cbda
SS
2150 "mov %esp,%ebp\n\t"
2151 "push %ebx");
6a271cae
PA
2152 /* At this point, the raw regs base address is at 8(%ebp), and the
2153 value pointer is at 12(%ebp). */
2154}
2155
2156static void
2157i386_emit_epilogue (void)
2158{
2159 EMIT_ASM32 (i386_epilogue,
2160 "mov 12(%ebp),%ecx\n\t"
2161 "mov %eax,(%ecx)\n\t"
2162 "mov %ebx,0x4(%ecx)\n\t"
2163 "xor %eax,%eax\n\t"
bf15cbda 2164 "pop %ebx\n\t"
6a271cae
PA
2165 "pop %ebp\n\t"
2166 "ret");
2167}
2168
2169static void
2170i386_emit_add (void)
2171{
2172 EMIT_ASM32 (i386_add,
2173 "add (%esp),%eax\n\t"
2174 "adc 0x4(%esp),%ebx\n\t"
2175 "lea 0x8(%esp),%esp");
2176}
2177
2178static void
2179i386_emit_sub (void)
2180{
2181 EMIT_ASM32 (i386_sub,
2182 "subl %eax,(%esp)\n\t"
2183 "sbbl %ebx,4(%esp)\n\t"
2184 "pop %eax\n\t"
2185 "pop %ebx\n\t");
2186}
2187
2188static void
2189i386_emit_mul (void)
2190{
2191 emit_error = 1;
2192}
2193
2194static void
2195i386_emit_lsh (void)
2196{
2197 emit_error = 1;
2198}
2199
2200static void
2201i386_emit_rsh_signed (void)
2202{
2203 emit_error = 1;
2204}
2205
2206static void
2207i386_emit_rsh_unsigned (void)
2208{
2209 emit_error = 1;
2210}
2211
2212static void
2213i386_emit_ext (int arg)
2214{
2215 switch (arg)
2216 {
2217 case 8:
2218 EMIT_ASM32 (i386_ext_8,
2219 "cbtw\n\t"
2220 "cwtl\n\t"
2221 "movl %eax,%ebx\n\t"
2222 "sarl $31,%ebx");
2223 break;
2224 case 16:
2225 EMIT_ASM32 (i386_ext_16,
2226 "cwtl\n\t"
2227 "movl %eax,%ebx\n\t"
2228 "sarl $31,%ebx");
2229 break;
2230 case 32:
2231 EMIT_ASM32 (i386_ext_32,
2232 "movl %eax,%ebx\n\t"
2233 "sarl $31,%ebx");
2234 break;
2235 default:
2236 emit_error = 1;
2237 }
2238}
2239
2240static void
2241i386_emit_log_not (void)
2242{
2243 EMIT_ASM32 (i386_log_not,
2244 "or %ebx,%eax\n\t"
2245 "test %eax,%eax\n\t"
2246 "sete %cl\n\t"
2247 "xor %ebx,%ebx\n\t"
2248 "movzbl %cl,%eax");
2249}
2250
2251static void
2252i386_emit_bit_and (void)
2253{
2254 EMIT_ASM32 (i386_and,
2255 "and (%esp),%eax\n\t"
2256 "and 0x4(%esp),%ebx\n\t"
2257 "lea 0x8(%esp),%esp");
2258}
2259
2260static void
2261i386_emit_bit_or (void)
2262{
2263 EMIT_ASM32 (i386_or,
2264 "or (%esp),%eax\n\t"
2265 "or 0x4(%esp),%ebx\n\t"
2266 "lea 0x8(%esp),%esp");
2267}
2268
2269static void
2270i386_emit_bit_xor (void)
2271{
2272 EMIT_ASM32 (i386_xor,
2273 "xor (%esp),%eax\n\t"
2274 "xor 0x4(%esp),%ebx\n\t"
2275 "lea 0x8(%esp),%esp");
2276}
2277
2278static void
2279i386_emit_bit_not (void)
2280{
2281 EMIT_ASM32 (i386_bit_not,
2282 "xor $0xffffffff,%eax\n\t"
2283 "xor $0xffffffff,%ebx\n\t");
2284}
2285
2286static void
2287i386_emit_equal (void)
2288{
2289 EMIT_ASM32 (i386_equal,
2290 "cmpl %ebx,4(%esp)\n\t"
2291 "jne .Li386_equal_false\n\t"
2292 "cmpl %eax,(%esp)\n\t"
2293 "je .Li386_equal_true\n\t"
2294 ".Li386_equal_false:\n\t"
2295 "xor %eax,%eax\n\t"
2296 "jmp .Li386_equal_end\n\t"
2297 ".Li386_equal_true:\n\t"
2298 "mov $1,%eax\n\t"
2299 ".Li386_equal_end:\n\t"
2300 "xor %ebx,%ebx\n\t"
2301 "lea 0x8(%esp),%esp");
2302}
2303
2304static void
2305i386_emit_less_signed (void)
2306{
2307 EMIT_ASM32 (i386_less_signed,
2308 "cmpl %ebx,4(%esp)\n\t"
2309 "jl .Li386_less_signed_true\n\t"
2310 "jne .Li386_less_signed_false\n\t"
2311 "cmpl %eax,(%esp)\n\t"
2312 "jl .Li386_less_signed_true\n\t"
2313 ".Li386_less_signed_false:\n\t"
2314 "xor %eax,%eax\n\t"
2315 "jmp .Li386_less_signed_end\n\t"
2316 ".Li386_less_signed_true:\n\t"
2317 "mov $1,%eax\n\t"
2318 ".Li386_less_signed_end:\n\t"
2319 "xor %ebx,%ebx\n\t"
2320 "lea 0x8(%esp),%esp");
2321}
2322
2323static void
2324i386_emit_less_unsigned (void)
2325{
2326 EMIT_ASM32 (i386_less_unsigned,
2327 "cmpl %ebx,4(%esp)\n\t"
2328 "jb .Li386_less_unsigned_true\n\t"
2329 "jne .Li386_less_unsigned_false\n\t"
2330 "cmpl %eax,(%esp)\n\t"
2331 "jb .Li386_less_unsigned_true\n\t"
2332 ".Li386_less_unsigned_false:\n\t"
2333 "xor %eax,%eax\n\t"
2334 "jmp .Li386_less_unsigned_end\n\t"
2335 ".Li386_less_unsigned_true:\n\t"
2336 "mov $1,%eax\n\t"
2337 ".Li386_less_unsigned_end:\n\t"
2338 "xor %ebx,%ebx\n\t"
2339 "lea 0x8(%esp),%esp");
2340}
2341
2342static void
2343i386_emit_ref (int size)
2344{
2345 switch (size)
2346 {
2347 case 1:
2348 EMIT_ASM32 (i386_ref1,
2349 "movb (%eax),%al");
2350 break;
2351 case 2:
2352 EMIT_ASM32 (i386_ref2,
2353 "movw (%eax),%ax");
2354 break;
2355 case 4:
2356 EMIT_ASM32 (i386_ref4,
2357 "movl (%eax),%eax");
2358 break;
2359 case 8:
2360 EMIT_ASM32 (i386_ref8,
2361 "movl 4(%eax),%ebx\n\t"
2362 "movl (%eax),%eax");
2363 break;
2364 }
2365}
2366
2367static void
2368i386_emit_if_goto (int *offset_p, int *size_p)
2369{
2370 EMIT_ASM32 (i386_if_goto,
2371 "mov %eax,%ecx\n\t"
2372 "or %ebx,%ecx\n\t"
2373 "pop %eax\n\t"
2374 "pop %ebx\n\t"
2375 "cmpl $0,%ecx\n\t"
2376 /* Don't trust the assembler to choose the right jump */
2377 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2378
2379 if (offset_p)
2380 *offset_p = 11; /* be sure that this matches the sequence above */
2381 if (size_p)
2382 *size_p = 4;
2383}
2384
2385static void
2386i386_emit_goto (int *offset_p, int *size_p)
2387{
2388 EMIT_ASM32 (i386_goto,
2389 /* Don't trust the assembler to choose the right jump */
2390 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2391 if (offset_p)
2392 *offset_p = 1;
2393 if (size_p)
2394 *size_p = 4;
2395}
2396
2397static void
2398i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2399{
2400 int diff = (to - (from + size));
2401 unsigned char buf[sizeof (int)];
2402
2403 /* We're only doing 4-byte sizes at the moment. */
2404 if (size != 4)
2405 {
2406 emit_error = 1;
2407 return;
2408 }
2409
2410 memcpy (buf, &diff, sizeof (int));
2411 write_inferior_memory (from, buf, sizeof (int));
2412}
2413
2414static void
4e29fb54 2415i386_emit_const (LONGEST num)
6a271cae
PA
2416{
2417 unsigned char buf[16];
b00ad6ff 2418 int i, hi, lo;
6a271cae
PA
2419 CORE_ADDR buildaddr = current_insn_ptr;
2420
2421 i = 0;
2422 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2423 lo = num & 0xffffffff;
2424 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2425 i += 4;
2426 hi = ((num >> 32) & 0xffffffff);
2427 if (hi)
2428 {
2429 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2430 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2431 i += 4;
2432 }
2433 else
2434 {
2435 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2436 }
2437 append_insns (&buildaddr, i, buf);
2438 current_insn_ptr = buildaddr;
2439}
2440
2441static void
2442i386_emit_call (CORE_ADDR fn)
2443{
2444 unsigned char buf[16];
2445 int i, offset;
2446 CORE_ADDR buildaddr;
2447
2448 buildaddr = current_insn_ptr;
2449 i = 0;
2450 buf[i++] = 0xe8; /* call <reladdr> */
2451 offset = ((int) fn) - (buildaddr + 5);
2452 memcpy (buf + 1, &offset, 4);
2453 append_insns (&buildaddr, 5, buf);
2454 current_insn_ptr = buildaddr;
2455}
2456
2457static void
2458i386_emit_reg (int reg)
2459{
2460 unsigned char buf[16];
2461 int i;
2462 CORE_ADDR buildaddr;
2463
2464 EMIT_ASM32 (i386_reg_a,
2465 "sub $0x8,%esp");
2466 buildaddr = current_insn_ptr;
2467 i = 0;
2468 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2469 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2470 i += 4;
2471 append_insns (&buildaddr, i, buf);
2472 current_insn_ptr = buildaddr;
2473 EMIT_ASM32 (i386_reg_b,
2474 "mov %eax,4(%esp)\n\t"
2475 "mov 8(%ebp),%eax\n\t"
2476 "mov %eax,(%esp)");
2477 i386_emit_call (get_raw_reg_func_addr ());
2478 EMIT_ASM32 (i386_reg_c,
2479 "xor %ebx,%ebx\n\t"
2480 "lea 0x8(%esp),%esp");
2481}
2482
2483static void
2484i386_emit_pop (void)
2485{
2486 EMIT_ASM32 (i386_pop,
2487 "pop %eax\n\t"
2488 "pop %ebx");
2489}
2490
2491static void
2492i386_emit_stack_flush (void)
2493{
2494 EMIT_ASM32 (i386_stack_flush,
2495 "push %ebx\n\t"
2496 "push %eax");
2497}
2498
2499static void
2500i386_emit_zero_ext (int arg)
2501{
2502 switch (arg)
2503 {
2504 case 8:
2505 EMIT_ASM32 (i386_zero_ext_8,
2506 "and $0xff,%eax\n\t"
2507 "xor %ebx,%ebx");
2508 break;
2509 case 16:
2510 EMIT_ASM32 (i386_zero_ext_16,
2511 "and $0xffff,%eax\n\t"
2512 "xor %ebx,%ebx");
2513 break;
2514 case 32:
2515 EMIT_ASM32 (i386_zero_ext_32,
2516 "xor %ebx,%ebx");
2517 break;
2518 default:
2519 emit_error = 1;
2520 }
2521}
2522
2523static void
2524i386_emit_swap (void)
2525{
2526 EMIT_ASM32 (i386_swap,
2527 "mov %eax,%ecx\n\t"
2528 "mov %ebx,%edx\n\t"
2529 "pop %eax\n\t"
2530 "pop %ebx\n\t"
2531 "push %edx\n\t"
2532 "push %ecx");
2533}
2534
2535static void
2536i386_emit_stack_adjust (int n)
2537{
2538 unsigned char buf[16];
2539 int i;
2540 CORE_ADDR buildaddr = current_insn_ptr;
2541
2542 i = 0;
2543 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2544 buf[i++] = 0x64;
2545 buf[i++] = 0x24;
2546 buf[i++] = n * 8;
2547 append_insns (&buildaddr, i, buf);
2548 current_insn_ptr = buildaddr;
2549}
2550
2551/* FN's prototype is `LONGEST(*fn)(int)'. */
2552
2553static void
2554i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2555{
2556 unsigned char buf[16];
2557 int i;
2558 CORE_ADDR buildaddr;
2559
2560 EMIT_ASM32 (i386_int_call_1_a,
2561 /* Reserve a bit of stack space. */
2562 "sub $0x8,%esp");
2563 /* Put the one argument on the stack. */
2564 buildaddr = current_insn_ptr;
2565 i = 0;
2566 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2567 buf[i++] = 0x04;
2568 buf[i++] = 0x24;
b00ad6ff 2569 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2570 i += 4;
2571 append_insns (&buildaddr, i, buf);
2572 current_insn_ptr = buildaddr;
2573 i386_emit_call (fn);
2574 EMIT_ASM32 (i386_int_call_1_c,
2575 "mov %edx,%ebx\n\t"
2576 "lea 0x8(%esp),%esp");
2577}
2578
4e29fb54 2579/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2580
2581static void
2582i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2583{
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr;
2587
2588 EMIT_ASM32 (i386_void_call_2_a,
2589 /* Preserve %eax only; we don't have to worry about %ebx. */
2590 "push %eax\n\t"
2591 /* Reserve a bit of stack space for arguments. */
2592 "sub $0x10,%esp\n\t"
2593 /* Copy "top" to the second argument position. (Note that
2594 we can't assume function won't scribble on its
2595 arguments, so don't try to restore from this.) */
2596 "mov %eax,4(%esp)\n\t"
2597 "mov %ebx,8(%esp)");
2598 /* Put the first argument on the stack. */
2599 buildaddr = current_insn_ptr;
2600 i = 0;
2601 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2602 buf[i++] = 0x04;
2603 buf[i++] = 0x24;
b00ad6ff 2604 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2605 i += 4;
2606 append_insns (&buildaddr, i, buf);
2607 current_insn_ptr = buildaddr;
2608 i386_emit_call (fn);
2609 EMIT_ASM32 (i386_void_call_2_b,
2610 "lea 0x10(%esp),%esp\n\t"
2611 /* Restore original stack top. */
2612 "pop %eax");
2613}
2614
6b9801d4
SS
2615
2616void
2617i386_emit_eq_goto (int *offset_p, int *size_p)
2618{
2619 EMIT_ASM32 (eq,
2620 /* Check low half first, more likely to be decider */
2621 "cmpl %eax,(%esp)\n\t"
2622 "jne .Leq_fallthru\n\t"
2623 "cmpl %ebx,4(%esp)\n\t"
2624 "jne .Leq_fallthru\n\t"
2625 "lea 0x8(%esp),%esp\n\t"
2626 "pop %eax\n\t"
2627 "pop %ebx\n\t"
2628 /* jmp, but don't trust the assembler to choose the right jump */
2629 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2630 ".Leq_fallthru:\n\t"
2631 "lea 0x8(%esp),%esp\n\t"
2632 "pop %eax\n\t"
2633 "pop %ebx");
2634
2635 if (offset_p)
2636 *offset_p = 18;
2637 if (size_p)
2638 *size_p = 4;
2639}
2640
2641void
2642i386_emit_ne_goto (int *offset_p, int *size_p)
2643{
2644 EMIT_ASM32 (ne,
2645 /* Check low half first, more likely to be decider */
2646 "cmpl %eax,(%esp)\n\t"
2647 "jne .Lne_jump\n\t"
2648 "cmpl %ebx,4(%esp)\n\t"
2649 "je .Lne_fallthru\n\t"
2650 ".Lne_jump:\n\t"
2651 "lea 0x8(%esp),%esp\n\t"
2652 "pop %eax\n\t"
2653 "pop %ebx\n\t"
2654 /* jmp, but don't trust the assembler to choose the right jump */
2655 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2656 ".Lne_fallthru:\n\t"
2657 "lea 0x8(%esp),%esp\n\t"
2658 "pop %eax\n\t"
2659 "pop %ebx");
2660
2661 if (offset_p)
2662 *offset_p = 18;
2663 if (size_p)
2664 *size_p = 4;
2665}
2666
2667void
2668i386_emit_lt_goto (int *offset_p, int *size_p)
2669{
2670 EMIT_ASM32 (lt,
2671 "cmpl %ebx,4(%esp)\n\t"
2672 "jl .Llt_jump\n\t"
2673 "jne .Llt_fallthru\n\t"
2674 "cmpl %eax,(%esp)\n\t"
2675 "jnl .Llt_fallthru\n\t"
2676 ".Llt_jump:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2678 "pop %eax\n\t"
2679 "pop %ebx\n\t"
2680 /* jmp, but don't trust the assembler to choose the right jump */
2681 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2682 ".Llt_fallthru:\n\t"
2683 "lea 0x8(%esp),%esp\n\t"
2684 "pop %eax\n\t"
2685 "pop %ebx");
2686
2687 if (offset_p)
2688 *offset_p = 20;
2689 if (size_p)
2690 *size_p = 4;
2691}
2692
2693void
2694i386_emit_le_goto (int *offset_p, int *size_p)
2695{
2696 EMIT_ASM32 (le,
2697 "cmpl %ebx,4(%esp)\n\t"
2698 "jle .Lle_jump\n\t"
2699 "jne .Lle_fallthru\n\t"
2700 "cmpl %eax,(%esp)\n\t"
2701 "jnle .Lle_fallthru\n\t"
2702 ".Lle_jump:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2704 "pop %eax\n\t"
2705 "pop %ebx\n\t"
2706 /* jmp, but don't trust the assembler to choose the right jump */
2707 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2708 ".Lle_fallthru:\n\t"
2709 "lea 0x8(%esp),%esp\n\t"
2710 "pop %eax\n\t"
2711 "pop %ebx");
2712
2713 if (offset_p)
2714 *offset_p = 20;
2715 if (size_p)
2716 *size_p = 4;
2717}
2718
2719void
2720i386_emit_gt_goto (int *offset_p, int *size_p)
2721{
2722 EMIT_ASM32 (gt,
2723 "cmpl %ebx,4(%esp)\n\t"
2724 "jg .Lgt_jump\n\t"
2725 "jne .Lgt_fallthru\n\t"
2726 "cmpl %eax,(%esp)\n\t"
2727 "jng .Lgt_fallthru\n\t"
2728 ".Lgt_jump:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2730 "pop %eax\n\t"
2731 "pop %ebx\n\t"
2732 /* jmp, but don't trust the assembler to choose the right jump */
2733 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2734 ".Lgt_fallthru:\n\t"
2735 "lea 0x8(%esp),%esp\n\t"
2736 "pop %eax\n\t"
2737 "pop %ebx");
2738
2739 if (offset_p)
2740 *offset_p = 20;
2741 if (size_p)
2742 *size_p = 4;
2743}
2744
2745void
2746i386_emit_ge_goto (int *offset_p, int *size_p)
2747{
2748 EMIT_ASM32 (ge,
2749 "cmpl %ebx,4(%esp)\n\t"
2750 "jge .Lge_jump\n\t"
2751 "jne .Lge_fallthru\n\t"
2752 "cmpl %eax,(%esp)\n\t"
2753 "jnge .Lge_fallthru\n\t"
2754 ".Lge_jump:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx\n\t"
2758 /* jmp, but don't trust the assembler to choose the right jump */
2759 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2760 ".Lge_fallthru:\n\t"
2761 "lea 0x8(%esp),%esp\n\t"
2762 "pop %eax\n\t"
2763 "pop %ebx");
2764
2765 if (offset_p)
2766 *offset_p = 20;
2767 if (size_p)
2768 *size_p = 4;
2769}
2770
6a271cae
PA
2771struct emit_ops i386_emit_ops =
2772 {
2773 i386_emit_prologue,
2774 i386_emit_epilogue,
2775 i386_emit_add,
2776 i386_emit_sub,
2777 i386_emit_mul,
2778 i386_emit_lsh,
2779 i386_emit_rsh_signed,
2780 i386_emit_rsh_unsigned,
2781 i386_emit_ext,
2782 i386_emit_log_not,
2783 i386_emit_bit_and,
2784 i386_emit_bit_or,
2785 i386_emit_bit_xor,
2786 i386_emit_bit_not,
2787 i386_emit_equal,
2788 i386_emit_less_signed,
2789 i386_emit_less_unsigned,
2790 i386_emit_ref,
2791 i386_emit_if_goto,
2792 i386_emit_goto,
2793 i386_write_goto_address,
2794 i386_emit_const,
2795 i386_emit_call,
2796 i386_emit_reg,
2797 i386_emit_pop,
2798 i386_emit_stack_flush,
2799 i386_emit_zero_ext,
2800 i386_emit_swap,
2801 i386_emit_stack_adjust,
2802 i386_emit_int_call_1,
6b9801d4
SS
2803 i386_emit_void_call_2,
2804 i386_emit_eq_goto,
2805 i386_emit_ne_goto,
2806 i386_emit_lt_goto,
2807 i386_emit_le_goto,
2808 i386_emit_gt_goto,
2809 i386_emit_ge_goto
6a271cae
PA
2810 };
2811
2812
2813static struct emit_ops *
2814x86_emit_ops (void)
2815{
2816#ifdef __x86_64__
3aee8918 2817 if (is_64bit_tdesc ())
6a271cae
PA
2818 return &amd64_emit_ops;
2819 else
2820#endif
2821 return &i386_emit_ops;
2822}
2823
dd373349
AT
2824/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2825
2826static const gdb_byte *
2827x86_sw_breakpoint_from_kind (int kind, int *size)
2828{
2829 *size = x86_breakpoint_len;
2830 return x86_breakpoint;
2831}
2832
c2d6af84
PA
2833static int
2834x86_supports_range_stepping (void)
2835{
2836 return 1;
2837}
2838
7d00775e
AT
2839/* Implementation of linux_target_ops method "supports_hardware_single_step".
2840 */
2841
2842static int
2843x86_supports_hardware_single_step (void)
2844{
2845 return 1;
2846}
2847
ae91f625
MK
2848static int
2849x86_get_ipa_tdesc_idx (void)
2850{
2851 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2852 const struct target_desc *tdesc = regcache->tdesc;
2853
2854#ifdef __x86_64__
2855 if (tdesc == tdesc_amd64_linux || tdesc == tdesc_amd64_linux_no_xml
2856 || tdesc == tdesc_x32_linux)
2857 return X86_TDESC_SSE;
2858 if (tdesc == tdesc_amd64_avx_linux || tdesc == tdesc_x32_avx_linux)
2859 return X86_TDESC_AVX;
2860 if (tdesc == tdesc_amd64_mpx_linux)
2861 return X86_TDESC_MPX;
2b863f51
WT
2862 if (tdesc == tdesc_amd64_avx_mpx_linux)
2863 return X86_TDESC_AVX_MPX;
ae91f625
MK
2864 if (tdesc == tdesc_amd64_avx512_linux || tdesc == tdesc_x32_avx512_linux)
2865 return X86_TDESC_AVX512;
2866#endif
2867
2868 if (tdesc == tdesc_i386_mmx_linux)
2869 return X86_TDESC_MMX;
2870 if (tdesc == tdesc_i386_linux || tdesc == tdesc_i386_linux_no_xml)
2871 return X86_TDESC_SSE;
2872 if (tdesc == tdesc_i386_avx_linux)
2873 return X86_TDESC_AVX;
2874 if (tdesc == tdesc_i386_mpx_linux)
2875 return X86_TDESC_MPX;
2b863f51
WT
2876 if (tdesc == tdesc_i386_avx_mpx_linux)
2877 return X86_TDESC_AVX_MPX;
ae91f625
MK
2878 if (tdesc == tdesc_i386_avx512_linux)
2879 return X86_TDESC_AVX512;
2880
2881 return 0;
2882}
2883
d0722149
DE
2884/* This is initialized assuming an amd64 target.
2885 x86_arch_setup will correct it for i386 or amd64 targets. */
2886
2887struct linux_target_ops the_low_target =
2888{
2889 x86_arch_setup,
3aee8918
PA
2890 x86_linux_regs_info,
2891 x86_cannot_fetch_register,
2892 x86_cannot_store_register,
c14dfd32 2893 NULL, /* fetch_register */
d0722149
DE
2894 x86_get_pc,
2895 x86_set_pc,
dd373349
AT
2896 NULL, /* breakpoint_kind_from_pc */
2897 x86_sw_breakpoint_from_kind,
d0722149
DE
2898 NULL,
2899 1,
2900 x86_breakpoint_at,
802e8e6d 2901 x86_supports_z_point_type,
aa5ca48f
DE
2902 x86_insert_point,
2903 x86_remove_point,
2904 x86_stopped_by_watchpoint,
2905 x86_stopped_data_address,
d0722149
DE
2906 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2907 native i386 case (no registers smaller than an xfer unit), and are not
2908 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2909 NULL,
2910 NULL,
2911 /* need to fix up i386 siginfo if host is amd64 */
2912 x86_siginfo_fixup,
aa5ca48f
DE
2913 x86_linux_new_process,
2914 x86_linux_new_thread,
3a8a0396 2915 x86_linux_new_fork,
1570b33e 2916 x86_linux_prepare_to_resume,
219f2f23 2917 x86_linux_process_qsupported,
fa593d66
PA
2918 x86_supports_tracepoints,
2919 x86_get_thread_area,
6a271cae 2920 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2921 x86_emit_ops,
2922 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2923 x86_supports_range_stepping,
7d00775e
AT
2924 NULL, /* breakpoint_kind_from_current_state */
2925 x86_supports_hardware_single_step,
82075af2 2926 x86_get_syscall_trapinfo,
ae91f625 2927 x86_get_ipa_tdesc_idx,
d0722149 2928};
3aee8918
PA
2929
2930void
2931initialize_low_arch (void)
2932{
2933 /* Initialize the Linux target descriptions. */
2934#ifdef __x86_64__
2935 init_registers_amd64_linux ();
2936 init_registers_amd64_avx_linux ();
01f9f808 2937 init_registers_amd64_avx512_linux ();
a196ebeb 2938 init_registers_amd64_mpx_linux ();
2b863f51 2939 init_registers_amd64_avx_mpx_linux ();
a196ebeb 2940
3aee8918 2941 init_registers_x32_linux ();
7e5aaa09 2942 init_registers_x32_avx_linux ();
01f9f808 2943 init_registers_x32_avx512_linux ();
3aee8918 2944
8d749320 2945 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
3aee8918
PA
2946 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2947 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2948#endif
2949 init_registers_i386_linux ();
2950 init_registers_i386_mmx_linux ();
2951 init_registers_i386_avx_linux ();
01f9f808 2952 init_registers_i386_avx512_linux ();
a196ebeb 2953 init_registers_i386_mpx_linux ();
2b863f51 2954 init_registers_i386_avx_mpx_linux ();
3aee8918 2955
8d749320 2956 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
3aee8918
PA
2957 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
2958 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2959
2960 initialize_regsets_info (&x86_regsets_info);
2961}
This page took 0.713372 seconds and 4 git commands to generate.