Add x86_debug_reg_state to gdbserver
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
32d0add0 3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
d0722149
DE
28
29#include "gdb_proc_service.h"
b5737fa9
PA
30/* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32#ifndef ELFMAG0
33#include "elf/common.h"
34#endif
35
58b4daa5 36#include "agent.h"
3aee8918 37#include "tdesc.h"
c144c7a0 38#include "tracepoint.h"
f699aaba 39#include "ax.h"
7b669087 40#include "nat/linux-nat.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
01f9f808
MS
51/* Defined in auto-generated file amd64-avx512-linux.c. */
52void init_registers_amd64_avx512_linux (void);
53extern const struct target_desc *tdesc_amd64_avx512_linux;
54
a196ebeb
WT
55/* Defined in auto-generated file amd64-mpx-linux.c. */
56void init_registers_amd64_mpx_linux (void);
57extern const struct target_desc *tdesc_amd64_mpx_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-linux.c. */
60void init_registers_x32_linux (void);
3aee8918
PA
61extern const struct target_desc *tdesc_x32_linux;
62
4d47af5c
L
63/* Defined in auto-generated file x32-avx-linux.c. */
64void init_registers_x32_avx_linux (void);
3aee8918 65extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 66
01f9f808
MS
67/* Defined in auto-generated file x32-avx512-linux.c. */
68void init_registers_x32_avx512_linux (void);
69extern const struct target_desc *tdesc_x32_avx512_linux;
70
3aee8918
PA
71#endif
72
73/* Defined in auto-generated file i386-linux.c. */
74void init_registers_i386_linux (void);
75extern const struct target_desc *tdesc_i386_linux;
76
77/* Defined in auto-generated file i386-mmx-linux.c. */
78void init_registers_i386_mmx_linux (void);
79extern const struct target_desc *tdesc_i386_mmx_linux;
80
81/* Defined in auto-generated file i386-avx-linux.c. */
82void init_registers_i386_avx_linux (void);
83extern const struct target_desc *tdesc_i386_avx_linux;
84
01f9f808
MS
85/* Defined in auto-generated file i386-avx512-linux.c. */
86void init_registers_i386_avx512_linux (void);
87extern const struct target_desc *tdesc_i386_avx512_linux;
88
a196ebeb
WT
89/* Defined in auto-generated file i386-mpx-linux.c. */
90void init_registers_i386_mpx_linux (void);
91extern const struct target_desc *tdesc_i386_mpx_linux;
92
3aee8918
PA
93#ifdef __x86_64__
94static struct target_desc *tdesc_amd64_linux_no_xml;
95#endif
96static struct target_desc *tdesc_i386_linux_no_xml;
97
1570b33e 98
fa593d66 99static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 100static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 101
1570b33e
L
102/* Backward compatibility for gdb without XML support. */
103
104static const char *xmltarget_i386_linux_no_xml = "@<target>\
105<architecture>i386</architecture>\
106<osabi>GNU/Linux</osabi>\
107</target>";
f6d1620c
L
108
109#ifdef __x86_64__
1570b33e
L
110static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111<architecture>i386:x86-64</architecture>\
112<osabi>GNU/Linux</osabi>\
113</target>";
f6d1620c 114#endif
d0722149
DE
115
116#include <sys/reg.h>
117#include <sys/procfs.h>
118#include <sys/ptrace.h>
1570b33e
L
119#include <sys/uio.h>
120
121#ifndef PTRACE_GETREGSET
122#define PTRACE_GETREGSET 0x4204
123#endif
124
125#ifndef PTRACE_SETREGSET
126#define PTRACE_SETREGSET 0x4205
127#endif
128
d0722149
DE
129
130#ifndef PTRACE_GET_THREAD_AREA
131#define PTRACE_GET_THREAD_AREA 25
132#endif
133
134/* This definition comes from prctl.h, but some kernels may not have it. */
135#ifndef PTRACE_ARCH_PRCTL
136#define PTRACE_ARCH_PRCTL 30
137#endif
138
139/* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141#ifndef ARCH_GET_FS
142#define ARCH_SET_GS 0x1001
143#define ARCH_SET_FS 0x1002
144#define ARCH_GET_FS 0x1003
145#define ARCH_GET_GS 0x1004
146#endif
147
aa5ca48f
DE
148/* Per-process arch-specific data we want to keep. */
149
150struct arch_process_info
151{
df7e5265 152 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
153};
154
155/* Per-thread arch-specific data we want to keep. */
156
157struct arch_lwp_info
158{
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161};
162
d0722149
DE
163#ifdef __x86_64__
164
165/* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168static /*const*/ int i386_regmap[] =
169{
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174};
175
176#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178/* So code below doesn't have to care, i386 or amd64. */
179#define ORIG_EAX ORIG_RAX
bc9540e8 180#define REGSIZE 8
d0722149
DE
181
182static const int x86_64_regmap[] =
183{
184 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
185 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
186 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
187 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
193 -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 ORIG_RAX * 8,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
207};
208
209#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 210#define X86_64_USER_REGS (GS + 1)
d0722149
DE
211
212#else /* ! __x86_64__ */
213
214/* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216static /*const*/ int i386_regmap[] =
217{
218 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
219 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
220 EIP * 4, EFL * 4, CS * 4, SS * 4,
221 DS * 4, ES * 4, FS * 4, GS * 4
222};
223
224#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225
bc9540e8
PA
226#define REGSIZE 4
227
d0722149 228#endif
3aee8918
PA
229
230#ifdef __x86_64__
231
232/* Returns true if the current inferior belongs to a x86-64 process,
233 per the tdesc. */
234
235static int
236is_64bit_tdesc (void)
237{
0bfdf32f 238 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
239
240 return register_size (regcache->tdesc, 0) == 8;
241}
242
243#endif
244
d0722149
DE
245\f
246/* Called by libthread_db. */
247
248ps_err_e
249ps_get_thread_area (const struct ps_prochandle *ph,
250 lwpid_t lwpid, int idx, void **base)
251{
252#ifdef __x86_64__
3aee8918 253 int use_64bit = is_64bit_tdesc ();
d0722149
DE
254
255 if (use_64bit)
256 {
257 switch (idx)
258 {
259 case FS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
261 return PS_OK;
262 break;
263 case GS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
265 return PS_OK;
266 break;
267 default:
268 return PS_BADADDR;
269 }
270 return PS_ERR;
271 }
272#endif
273
274 {
275 unsigned int desc[4];
276
277 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
278 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
279 return PS_ERR;
280
d1ec4ce7
DE
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
283 return PS_OK;
284 }
285}
fa593d66
PA
286
287/* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
291
292static int
293x86_get_thread_area (int lwpid, CORE_ADDR *addr)
294{
295#ifdef __x86_64__
3aee8918 296 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
297
298 if (use_64bit)
299 {
300 void *base;
301 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
302 {
303 *addr = (CORE_ADDR) (uintptr_t) base;
304 return 0;
305 }
306
307 return -1;
308 }
309#endif
310
311 {
312 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
313 struct thread_info *thr = get_lwp_thread (lwp);
314 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
315 unsigned int desc[4];
316 ULONGEST gs = 0;
317 const int reg_thread_area = 3; /* bits to scale down register value. */
318 int idx;
319
320 collect_register_by_name (regcache, "gs", &gs);
321
322 idx = gs >> reg_thread_area;
323
324 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 325 lwpid_of (thr),
493e2a69 326 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
327 return -1;
328
329 *addr = desc[1];
330 return 0;
331 }
332}
333
334
d0722149
DE
335\f
336static int
3aee8918 337x86_cannot_store_register (int regno)
d0722149 338{
3aee8918
PA
339#ifdef __x86_64__
340 if (is_64bit_tdesc ())
341 return 0;
342#endif
343
d0722149
DE
344 return regno >= I386_NUM_REGS;
345}
346
347static int
3aee8918 348x86_cannot_fetch_register (int regno)
d0722149 349{
3aee8918
PA
350#ifdef __x86_64__
351 if (is_64bit_tdesc ())
352 return 0;
353#endif
354
d0722149
DE
355 return regno >= I386_NUM_REGS;
356}
357
358static void
442ea881 359x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
360{
361 int i;
362
363#ifdef __x86_64__
3aee8918 364 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
442ea881 368 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
369 return;
370 }
9e0aa64f
JK
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
375#endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 379
442ea881 380 collect_register_by_name (regcache, "orig_eax",
bc9540e8 381 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
382}
383
384static void
442ea881 385x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
386{
387 int i;
388
389#ifdef __x86_64__
3aee8918 390 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
391 {
392 for (i = 0; i < X86_64_NUM_REGS; i++)
393 if (x86_64_regmap[i] != -1)
442ea881 394 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
395 return;
396 }
397#endif
398
399 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 400 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 401
442ea881 402 supply_register_by_name (regcache, "orig_eax",
bc9540e8 403 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
404}
405
406static void
442ea881 407x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
408{
409#ifdef __x86_64__
442ea881 410 i387_cache_to_fxsave (regcache, buf);
d0722149 411#else
442ea881 412 i387_cache_to_fsave (regcache, buf);
d0722149
DE
413#endif
414}
415
416static void
442ea881 417x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
418{
419#ifdef __x86_64__
442ea881 420 i387_fxsave_to_cache (regcache, buf);
d0722149 421#else
442ea881 422 i387_fsave_to_cache (regcache, buf);
d0722149
DE
423#endif
424}
425
426#ifndef __x86_64__
427
428static void
442ea881 429x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 430{
442ea881 431 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
432}
433
434static void
442ea881 435x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 436{
442ea881 437 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
438}
439
440#endif
441
1570b33e
L
442static void
443x86_fill_xstateregset (struct regcache *regcache, void *buf)
444{
445 i387_cache_to_xsave (regcache, buf);
446}
447
448static void
449x86_store_xstateregset (struct regcache *regcache, const void *buf)
450{
451 i387_xsave_to_cache (regcache, buf);
452}
453
d0722149
DE
454/* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
3aee8918 459 and update the supported regsets accordingly. */
d0722149 460
3aee8918 461static struct regset_info x86_regsets[] =
d0722149
DE
462{
463#ifdef HAVE_PTRACE_GETREGS
1570b33e 464 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
465 GENERAL_REGS,
466 x86_fill_gregset, x86_store_gregset },
1570b33e
L
467 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
468 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
469# ifndef __x86_64__
470# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 471 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
472 EXTENDED_REGS,
473 x86_fill_fpxregset, x86_store_fpxregset },
474# endif
475# endif
1570b33e 476 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
477 FP_REGS,
478 x86_fill_fpregset, x86_store_fpregset },
479#endif /* HAVE_PTRACE_GETREGS */
1570b33e 480 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
481};
482
483static CORE_ADDR
442ea881 484x86_get_pc (struct regcache *regcache)
d0722149 485{
3aee8918 486 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
487
488 if (use_64bit)
489 {
490 unsigned long pc;
442ea881 491 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
492 return (CORE_ADDR) pc;
493 }
494 else
495 {
496 unsigned int pc;
442ea881 497 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
498 return (CORE_ADDR) pc;
499 }
500}
501
502static void
442ea881 503x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 504{
3aee8918 505 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
506
507 if (use_64bit)
508 {
509 unsigned long newpc = pc;
442ea881 510 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
511 }
512 else
513 {
514 unsigned int newpc = pc;
442ea881 515 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
516 }
517}
518\f
519static const unsigned char x86_breakpoint[] = { 0xCC };
520#define x86_breakpoint_len 1
521
522static int
523x86_breakpoint_at (CORE_ADDR pc)
524{
525 unsigned char c;
526
fc7238bb 527 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
528 if (c == 0xCC)
529 return 1;
530
531 return 0;
532}
533\f
4180215b
PA
534
535/* Return the offset of REGNUM in the u_debugreg field of struct
536 user. */
537
538static int
539u_debugreg_offset (int regnum)
540{
541 return (offsetof (struct user, u_debugreg)
542 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
543}
544
545
aa5ca48f
DE
546/* Support for debug registers. */
547
548static unsigned long
549x86_linux_dr_get (ptid_t ptid, int regnum)
550{
551 int tid;
552 unsigned long value;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
4180215b 557 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
aa5ca48f
DE
558 if (errno != 0)
559 error ("Couldn't read debug register");
560
561 return value;
562}
563
564static void
565x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
566{
567 int tid;
568
569 tid = ptid_get_lwp (ptid);
570
571 errno = 0;
4180215b 572 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
aa5ca48f
DE
573 if (errno != 0)
574 error ("Couldn't write debug register");
575}
576
964e4306
PA
577static int
578update_debug_registers_callback (struct inferior_list_entry *entry,
579 void *pid_p)
580{
d86d4aaf
DE
581 struct thread_info *thr = (struct thread_info *) entry;
582 struct lwp_info *lwp = get_thread_lwp (thr);
964e4306
PA
583 int pid = *(int *) pid_p;
584
585 /* Only update the threads of this process. */
d86d4aaf 586 if (pid_of (thr) == pid)
964e4306
PA
587 {
588 /* The actual update is done later just before resuming the lwp,
589 we just mark that the registers need updating. */
590 lwp->arch_private->debug_registers_changed = 1;
591
592 /* If the lwp isn't stopped, force it to momentarily pause, so
593 we can update its debug registers. */
594 if (!lwp->stopped)
595 linux_stop_lwp (lwp);
596 }
597
598 return 0;
599}
600
aa5ca48f
DE
601/* Update the inferior's debug register REGNUM from STATE. */
602
42995dbd 603static void
df7e5265 604x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
aa5ca48f 605{
964e4306 606 /* Only update the threads of this process. */
0bfdf32f 607 int pid = pid_of (current_thread);
aa5ca48f 608
f7160e97 609 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
aa5ca48f 610
d86d4aaf 611 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 612}
aa5ca48f 613
964e4306 614/* Return the inferior's debug register REGNUM. */
aa5ca48f 615
42995dbd 616static CORE_ADDR
df7e5265 617x86_dr_low_get_addr (int regnum)
964e4306 618{
0a5b1e09 619 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306 620
7b669087 621 return x86_linux_dr_get (current_lwp_ptid (), regnum);
aa5ca48f
DE
622}
623
624/* Update the inferior's DR7 debug control register from STATE. */
625
42995dbd 626static void
df7e5265 627x86_dr_low_set_control (unsigned long control)
aa5ca48f 628{
964e4306 629 /* Only update the threads of this process. */
0bfdf32f 630 int pid = pid_of (current_thread);
aa5ca48f 631
d86d4aaf 632 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 633}
aa5ca48f 634
964e4306
PA
635/* Return the inferior's DR7 debug control register. */
636
42995dbd 637static unsigned long
df7e5265 638x86_dr_low_get_control (void)
964e4306 639{
7b669087 640 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
aa5ca48f
DE
641}
642
643/* Get the value of the DR6 debug status register from the inferior
644 and record it in STATE. */
645
42995dbd 646static unsigned long
df7e5265 647x86_dr_low_get_status (void)
aa5ca48f 648{
7b669087 649 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
aa5ca48f 650}
42995dbd
GB
651
652/* Low-level function vector. */
df7e5265 653struct x86_dr_low_type x86_dr_low =
42995dbd 654 {
df7e5265
GB
655 x86_dr_low_set_control,
656 x86_dr_low_set_addr,
657 x86_dr_low_get_addr,
658 x86_dr_low_get_status,
659 x86_dr_low_get_control,
42995dbd
GB
660 sizeof (void *),
661 };
aa5ca48f 662\f
90d74c30 663/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
664
665static int
802e8e6d
PA
666x86_supports_z_point_type (char z_type)
667{
668 switch (z_type)
669 {
670 case Z_PACKET_SW_BP:
671 case Z_PACKET_HW_BP:
672 case Z_PACKET_WRITE_WP:
673 case Z_PACKET_ACCESS_WP:
674 return 1;
675 default:
676 return 0;
677 }
678}
679
680static int
681x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
682 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
683{
684 struct process_info *proc = current_process ();
802e8e6d 685
aa5ca48f
DE
686 switch (type)
687 {
802e8e6d
PA
688 case raw_bkpt_type_sw:
689 return insert_memory_breakpoint (bp);
690
691 case raw_bkpt_type_hw:
692 case raw_bkpt_type_write_wp:
693 case raw_bkpt_type_access_wp:
a4165e94 694 {
802e8e6d
PA
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 697 struct x86_debug_reg_state *state
fe978cb0 698 = &proc->priv->arch_private->debug_reg_state;
a4165e94 699
df7e5265 700 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 701 }
961bd387 702
aa5ca48f
DE
703 default:
704 /* Unsupported. */
705 return 1;
706 }
707}
708
709static int
802e8e6d
PA
710x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
711 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
712{
713 struct process_info *proc = current_process ();
802e8e6d 714
aa5ca48f
DE
715 switch (type)
716 {
802e8e6d
PA
717 case raw_bkpt_type_sw:
718 return remove_memory_breakpoint (bp);
719
720 case raw_bkpt_type_hw:
721 case raw_bkpt_type_write_wp:
722 case raw_bkpt_type_access_wp:
a4165e94 723 {
802e8e6d
PA
724 enum target_hw_bp_type hw_type
725 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 726 struct x86_debug_reg_state *state
fe978cb0 727 = &proc->priv->arch_private->debug_reg_state;
a4165e94 728
df7e5265 729 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 730 }
aa5ca48f
DE
731 default:
732 /* Unsupported. */
733 return 1;
734 }
735}
736
737static int
738x86_stopped_by_watchpoint (void)
739{
740 struct process_info *proc = current_process ();
fe978cb0 741 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
742}
743
744static CORE_ADDR
745x86_stopped_data_address (void)
746{
747 struct process_info *proc = current_process ();
748 CORE_ADDR addr;
fe978cb0 749 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 750 &addr))
aa5ca48f
DE
751 return addr;
752 return 0;
753}
754\f
755/* Called when a new process is created. */
756
757static struct arch_process_info *
758x86_linux_new_process (void)
759{
ed859da7 760 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 761
df7e5265 762 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
763
764 return info;
765}
766
767/* Called when a new thread is detected. */
768
769static struct arch_lwp_info *
770x86_linux_new_thread (void)
771{
ed859da7 772 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
aa5ca48f
DE
773
774 info->debug_registers_changed = 1;
775
776 return info;
777}
778
70a0bb6b
GB
779/* See nat/x86-dregs.h. */
780
781struct x86_debug_reg_state *
782x86_debug_reg_state (pid_t pid)
783{
784 struct process_info *proc = find_process_pid (pid);
785
786 return &proc->priv->arch_private->debug_reg_state;
787}
788
aa5ca48f
DE
789/* Called when resuming a thread.
790 If the debug regs have changed, update the thread's copies. */
791
792static void
793x86_linux_prepare_to_resume (struct lwp_info *lwp)
794{
d86d4aaf 795 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
6210a125 796 int clear_status = 0;
b9a881c2 797
aa5ca48f
DE
798 if (lwp->arch_private->debug_registers_changed)
799 {
df7e5265 800 struct x86_debug_reg_state *state
70a0bb6b
GB
801 = x86_debug_reg_state (ptid_get_pid (ptid));
802 int i;
aa5ca48f 803
8e9db26e
PA
804 x86_linux_dr_set (ptid, DR_CONTROL, 0);
805
97ea6506 806 ALL_DEBUG_ADDRESS_REGISTERS (i)
6210a125
PA
807 if (state->dr_ref_count[i] > 0)
808 {
809 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
810
811 /* If we're setting a watchpoint, any change the inferior
812 had done itself to the debug registers needs to be
df7e5265 813 discarded, otherwise, x86_dr_stopped_data_address can
6210a125
PA
814 get confused. */
815 clear_status = 1;
816 }
aa5ca48f 817
8e9db26e
PA
818 if (state->dr_control_mirror != 0)
819 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
aa5ca48f
DE
820
821 lwp->arch_private->debug_registers_changed = 0;
822 }
b9a881c2 823
15c66dd6 824 if (clear_status || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
b9a881c2 825 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
826}
827\f
d0722149
DE
828/* When GDBSERVER is built as a 64-bit application on linux, the
829 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
830 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
831 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
832 conversion in-place ourselves. */
833
834/* These types below (compat_*) define a siginfo type that is layout
835 compatible with the siginfo type exported by the 32-bit userspace
836 support. */
837
838#ifdef __x86_64__
839
840typedef int compat_int_t;
841typedef unsigned int compat_uptr_t;
842
843typedef int compat_time_t;
844typedef int compat_timer_t;
845typedef int compat_clock_t;
846
847struct compat_timeval
848{
849 compat_time_t tv_sec;
850 int tv_usec;
851};
852
853typedef union compat_sigval
854{
855 compat_int_t sival_int;
856 compat_uptr_t sival_ptr;
857} compat_sigval_t;
858
859typedef struct compat_siginfo
860{
861 int si_signo;
862 int si_errno;
863 int si_code;
864
865 union
866 {
867 int _pad[((128 / sizeof (int)) - 3)];
868
869 /* kill() */
870 struct
871 {
872 unsigned int _pid;
873 unsigned int _uid;
874 } _kill;
875
876 /* POSIX.1b timers */
877 struct
878 {
879 compat_timer_t _tid;
880 int _overrun;
881 compat_sigval_t _sigval;
882 } _timer;
883
884 /* POSIX.1b signals */
885 struct
886 {
887 unsigned int _pid;
888 unsigned int _uid;
889 compat_sigval_t _sigval;
890 } _rt;
891
892 /* SIGCHLD */
893 struct
894 {
895 unsigned int _pid;
896 unsigned int _uid;
897 int _status;
898 compat_clock_t _utime;
899 compat_clock_t _stime;
900 } _sigchld;
901
902 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
903 struct
904 {
905 unsigned int _addr;
906 } _sigfault;
907
908 /* SIGPOLL */
909 struct
910 {
911 int _band;
912 int _fd;
913 } _sigpoll;
914 } _sifields;
915} compat_siginfo_t;
916
c92b5177
L
917/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
918typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
919
920typedef struct compat_x32_siginfo
921{
922 int si_signo;
923 int si_errno;
924 int si_code;
925
926 union
927 {
928 int _pad[((128 / sizeof (int)) - 3)];
929
930 /* kill() */
931 struct
932 {
933 unsigned int _pid;
934 unsigned int _uid;
935 } _kill;
936
937 /* POSIX.1b timers */
938 struct
939 {
940 compat_timer_t _tid;
941 int _overrun;
942 compat_sigval_t _sigval;
943 } _timer;
944
945 /* POSIX.1b signals */
946 struct
947 {
948 unsigned int _pid;
949 unsigned int _uid;
950 compat_sigval_t _sigval;
951 } _rt;
952
953 /* SIGCHLD */
954 struct
955 {
956 unsigned int _pid;
957 unsigned int _uid;
958 int _status;
959 compat_x32_clock_t _utime;
960 compat_x32_clock_t _stime;
961 } _sigchld;
962
963 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
964 struct
965 {
966 unsigned int _addr;
967 } _sigfault;
968
969 /* SIGPOLL */
970 struct
971 {
972 int _band;
973 int _fd;
974 } _sigpoll;
975 } _sifields;
976} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
977
d0722149
DE
978#define cpt_si_pid _sifields._kill._pid
979#define cpt_si_uid _sifields._kill._uid
980#define cpt_si_timerid _sifields._timer._tid
981#define cpt_si_overrun _sifields._timer._overrun
982#define cpt_si_status _sifields._sigchld._status
983#define cpt_si_utime _sifields._sigchld._utime
984#define cpt_si_stime _sifields._sigchld._stime
985#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
986#define cpt_si_addr _sifields._sigfault._addr
987#define cpt_si_band _sifields._sigpoll._band
988#define cpt_si_fd _sifields._sigpoll._fd
989
990/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
991 In their place is si_timer1,si_timer2. */
992#ifndef si_timerid
993#define si_timerid si_timer1
994#endif
995#ifndef si_overrun
996#define si_overrun si_timer2
997#endif
998
999static void
1000compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
1001{
1002 memset (to, 0, sizeof (*to));
1003
1004 to->si_signo = from->si_signo;
1005 to->si_errno = from->si_errno;
1006 to->si_code = from->si_code;
1007
b53a1623 1008 if (to->si_code == SI_TIMER)
d0722149 1009 {
b53a1623
PA
1010 to->cpt_si_timerid = from->si_timerid;
1011 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
1012 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1013 }
1014 else if (to->si_code == SI_USER)
1015 {
1016 to->cpt_si_pid = from->si_pid;
1017 to->cpt_si_uid = from->si_uid;
1018 }
b53a1623 1019 else if (to->si_code < 0)
d0722149 1020 {
b53a1623
PA
1021 to->cpt_si_pid = from->si_pid;
1022 to->cpt_si_uid = from->si_uid;
d0722149
DE
1023 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1024 }
1025 else
1026 {
1027 switch (to->si_signo)
1028 {
1029 case SIGCHLD:
1030 to->cpt_si_pid = from->si_pid;
1031 to->cpt_si_uid = from->si_uid;
1032 to->cpt_si_status = from->si_status;
1033 to->cpt_si_utime = from->si_utime;
1034 to->cpt_si_stime = from->si_stime;
1035 break;
1036 case SIGILL:
1037 case SIGFPE:
1038 case SIGSEGV:
1039 case SIGBUS:
1040 to->cpt_si_addr = (intptr_t) from->si_addr;
1041 break;
1042 case SIGPOLL:
1043 to->cpt_si_band = from->si_band;
1044 to->cpt_si_fd = from->si_fd;
1045 break;
1046 default:
1047 to->cpt_si_pid = from->si_pid;
1048 to->cpt_si_uid = from->si_uid;
1049 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1050 break;
1051 }
1052 }
1053}
1054
1055static void
1056siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1057{
1058 memset (to, 0, sizeof (*to));
1059
1060 to->si_signo = from->si_signo;
1061 to->si_errno = from->si_errno;
1062 to->si_code = from->si_code;
1063
b53a1623 1064 if (to->si_code == SI_TIMER)
d0722149 1065 {
b53a1623
PA
1066 to->si_timerid = from->cpt_si_timerid;
1067 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1068 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1069 }
1070 else if (to->si_code == SI_USER)
1071 {
1072 to->si_pid = from->cpt_si_pid;
1073 to->si_uid = from->cpt_si_uid;
1074 }
b53a1623 1075 else if (to->si_code < 0)
d0722149 1076 {
b53a1623
PA
1077 to->si_pid = from->cpt_si_pid;
1078 to->si_uid = from->cpt_si_uid;
d0722149
DE
1079 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1080 }
1081 else
1082 {
1083 switch (to->si_signo)
1084 {
1085 case SIGCHLD:
1086 to->si_pid = from->cpt_si_pid;
1087 to->si_uid = from->cpt_si_uid;
1088 to->si_status = from->cpt_si_status;
1089 to->si_utime = from->cpt_si_utime;
1090 to->si_stime = from->cpt_si_stime;
1091 break;
1092 case SIGILL:
1093 case SIGFPE:
1094 case SIGSEGV:
1095 case SIGBUS:
1096 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1097 break;
1098 case SIGPOLL:
1099 to->si_band = from->cpt_si_band;
1100 to->si_fd = from->cpt_si_fd;
1101 break;
1102 default:
1103 to->si_pid = from->cpt_si_pid;
1104 to->si_uid = from->cpt_si_uid;
1105 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1106 break;
1107 }
1108 }
1109}
1110
c92b5177
L
1111static void
1112compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1113 siginfo_t *from)
1114{
1115 memset (to, 0, sizeof (*to));
1116
1117 to->si_signo = from->si_signo;
1118 to->si_errno = from->si_errno;
1119 to->si_code = from->si_code;
1120
1121 if (to->si_code == SI_TIMER)
1122 {
1123 to->cpt_si_timerid = from->si_timerid;
1124 to->cpt_si_overrun = from->si_overrun;
1125 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1126 }
1127 else if (to->si_code == SI_USER)
1128 {
1129 to->cpt_si_pid = from->si_pid;
1130 to->cpt_si_uid = from->si_uid;
1131 }
1132 else if (to->si_code < 0)
1133 {
1134 to->cpt_si_pid = from->si_pid;
1135 to->cpt_si_uid = from->si_uid;
1136 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1137 }
1138 else
1139 {
1140 switch (to->si_signo)
1141 {
1142 case SIGCHLD:
1143 to->cpt_si_pid = from->si_pid;
1144 to->cpt_si_uid = from->si_uid;
1145 to->cpt_si_status = from->si_status;
1146 to->cpt_si_utime = from->si_utime;
1147 to->cpt_si_stime = from->si_stime;
1148 break;
1149 case SIGILL:
1150 case SIGFPE:
1151 case SIGSEGV:
1152 case SIGBUS:
1153 to->cpt_si_addr = (intptr_t) from->si_addr;
1154 break;
1155 case SIGPOLL:
1156 to->cpt_si_band = from->si_band;
1157 to->cpt_si_fd = from->si_fd;
1158 break;
1159 default:
1160 to->cpt_si_pid = from->si_pid;
1161 to->cpt_si_uid = from->si_uid;
1162 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1163 break;
1164 }
1165 }
1166}
1167
1168static void
1169siginfo_from_compat_x32_siginfo (siginfo_t *to,
1170 compat_x32_siginfo_t *from)
1171{
1172 memset (to, 0, sizeof (*to));
1173
1174 to->si_signo = from->si_signo;
1175 to->si_errno = from->si_errno;
1176 to->si_code = from->si_code;
1177
1178 if (to->si_code == SI_TIMER)
1179 {
1180 to->si_timerid = from->cpt_si_timerid;
1181 to->si_overrun = from->cpt_si_overrun;
1182 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1183 }
1184 else if (to->si_code == SI_USER)
1185 {
1186 to->si_pid = from->cpt_si_pid;
1187 to->si_uid = from->cpt_si_uid;
1188 }
1189 else if (to->si_code < 0)
1190 {
1191 to->si_pid = from->cpt_si_pid;
1192 to->si_uid = from->cpt_si_uid;
1193 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1194 }
1195 else
1196 {
1197 switch (to->si_signo)
1198 {
1199 case SIGCHLD:
1200 to->si_pid = from->cpt_si_pid;
1201 to->si_uid = from->cpt_si_uid;
1202 to->si_status = from->cpt_si_status;
1203 to->si_utime = from->cpt_si_utime;
1204 to->si_stime = from->cpt_si_stime;
1205 break;
1206 case SIGILL:
1207 case SIGFPE:
1208 case SIGSEGV:
1209 case SIGBUS:
1210 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1211 break;
1212 case SIGPOLL:
1213 to->si_band = from->cpt_si_band;
1214 to->si_fd = from->cpt_si_fd;
1215 break;
1216 default:
1217 to->si_pid = from->cpt_si_pid;
1218 to->si_uid = from->cpt_si_uid;
1219 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1220 break;
1221 }
1222 }
1223}
1224
d0722149
DE
1225#endif /* __x86_64__ */
1226
1227/* Convert a native/host siginfo object, into/from the siginfo in the
1228 layout of the inferiors' architecture. Returns true if any
1229 conversion was done; false otherwise. If DIRECTION is 1, then copy
1230 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1231 INF. */
1232
1233static int
a5362b9a 1234x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1235{
1236#ifdef __x86_64__
760256f9 1237 unsigned int machine;
0bfdf32f 1238 int tid = lwpid_of (current_thread);
760256f9
PA
1239 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1240
d0722149 1241 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1242 if (!is_64bit_tdesc ())
d0722149 1243 {
38e08fca 1244 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
d0722149
DE
1245
1246 if (direction == 0)
1247 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1248 else
1249 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1250
c92b5177
L
1251 return 1;
1252 }
1253 /* No fixup for native x32 GDB. */
760256f9 1254 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177 1255 {
38e08fca 1256 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
c92b5177
L
1257
1258 if (direction == 0)
1259 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1260 native);
1261 else
1262 siginfo_from_compat_x32_siginfo (native,
1263 (struct compat_x32_siginfo *) inf);
1264
d0722149
DE
1265 return 1;
1266 }
1267#endif
1268
1269 return 0;
1270}
1271\f
1570b33e
L
1272static int use_xml;
1273
3aee8918
PA
1274/* Format of XSAVE extended state is:
1275 struct
1276 {
1277 fxsave_bytes[0..463]
1278 sw_usable_bytes[464..511]
1279 xstate_hdr_bytes[512..575]
1280 avx_bytes[576..831]
1281 future_state etc
1282 };
1283
1284 Same memory layout will be used for the coredump NT_X86_XSTATE
1285 representing the XSAVE extended state registers.
1286
1287 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1288 extended state mask, which is the same as the extended control register
1289 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1290 together with the mask saved in the xstate_hdr_bytes to determine what
1291 states the processor/OS supports and what state, used or initialized,
1292 the process/thread is in. */
1293#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1294
1295/* Does the current host support the GETFPXREGS request? The header
1296 file may or may not define it, and even if it is defined, the
1297 kernel will return EIO if it's running on a pre-SSE processor. */
1298int have_ptrace_getfpxregs =
1299#ifdef HAVE_PTRACE_GETFPXREGS
1300 -1
1301#else
1302 0
1303#endif
1304;
1570b33e 1305
3aee8918
PA
1306/* Does the current host support PTRACE_GETREGSET? */
1307static int have_ptrace_getregset = -1;
1308
1309/* Get Linux/x86 target description from running target. */
1310
1311static const struct target_desc *
1312x86_linux_read_description (void)
1570b33e 1313{
3aee8918
PA
1314 unsigned int machine;
1315 int is_elf64;
a196ebeb 1316 int xcr0_features;
3aee8918
PA
1317 int tid;
1318 static uint64_t xcr0;
3a13a53b 1319 struct regset_info *regset;
1570b33e 1320
0bfdf32f 1321 tid = lwpid_of (current_thread);
1570b33e 1322
3aee8918 1323 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1324
3aee8918 1325 if (sizeof (void *) == 4)
3a13a53b 1326 {
3aee8918
PA
1327 if (is_elf64 > 0)
1328 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1329#ifndef __x86_64__
1330 else if (machine == EM_X86_64)
1331 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1332#endif
1333 }
3a13a53b 1334
3aee8918
PA
1335#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1336 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1337 {
1338 elf_fpxregset_t fpxregs;
3a13a53b 1339
3aee8918 1340 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1341 {
3aee8918
PA
1342 have_ptrace_getfpxregs = 0;
1343 have_ptrace_getregset = 0;
1344 return tdesc_i386_mmx_linux;
3a13a53b 1345 }
3aee8918
PA
1346 else
1347 have_ptrace_getfpxregs = 1;
3a13a53b 1348 }
1570b33e
L
1349#endif
1350
1351 if (!use_xml)
1352 {
df7e5265 1353 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 1354
1570b33e
L
1355 /* Don't use XML. */
1356#ifdef __x86_64__
3aee8918
PA
1357 if (machine == EM_X86_64)
1358 return tdesc_amd64_linux_no_xml;
1570b33e 1359 else
1570b33e 1360#endif
3aee8918 1361 return tdesc_i386_linux_no_xml;
1570b33e
L
1362 }
1363
1570b33e
L
1364 if (have_ptrace_getregset == -1)
1365 {
df7e5265 1366 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1367 struct iovec iov;
1570b33e
L
1368
1369 iov.iov_base = xstateregs;
1370 iov.iov_len = sizeof (xstateregs);
1371
1372 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1373 if (ptrace (PTRACE_GETREGSET, tid,
1374 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1375 have_ptrace_getregset = 0;
1376 else
1570b33e 1377 {
3aee8918
PA
1378 have_ptrace_getregset = 1;
1379
1380 /* Get XCR0 from XSAVE extended state. */
1381 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1382 / sizeof (uint64_t))];
1383
1384 /* Use PTRACE_GETREGSET if it is available. */
1385 for (regset = x86_regsets;
1386 regset->fill_function != NULL; regset++)
1387 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 1388 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
1389 else if (regset->type != GENERAL_REGS)
1390 regset->size = 0;
1570b33e 1391 }
1570b33e
L
1392 }
1393
3aee8918 1394 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 1395 xcr0_features = (have_ptrace_getregset
df7e5265 1396 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 1397
a196ebeb 1398 if (xcr0_features)
3aee8918 1399 x86_xcr0 = xcr0;
1570b33e 1400
3aee8918
PA
1401 if (machine == EM_X86_64)
1402 {
1570b33e 1403#ifdef __x86_64__
a196ebeb 1404 if (is_elf64)
3aee8918 1405 {
a196ebeb
WT
1406 if (xcr0_features)
1407 {
df7e5265 1408 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1409 {
df7e5265 1410 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1411 return tdesc_amd64_avx512_linux;
1412
df7e5265 1413 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
1414 return tdesc_amd64_mpx_linux;
1415
df7e5265 1416 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1417 return tdesc_amd64_avx_linux;
1418
1419 default:
1420 return tdesc_amd64_linux;
1421 }
1422 }
4d47af5c 1423 else
a196ebeb 1424 return tdesc_amd64_linux;
3aee8918
PA
1425 }
1426 else
1427 {
a196ebeb
WT
1428 if (xcr0_features)
1429 {
df7e5265 1430 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1431 {
df7e5265 1432 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1433 return tdesc_x32_avx512_linux;
1434
df7e5265
GB
1435 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1436 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1437 return tdesc_x32_avx_linux;
1438
1439 default:
1440 return tdesc_x32_linux;
1441 }
1442 }
3aee8918 1443 else
a196ebeb 1444 return tdesc_x32_linux;
1570b33e 1445 }
3aee8918 1446#endif
1570b33e 1447 }
3aee8918
PA
1448 else
1449 {
a196ebeb
WT
1450 if (xcr0_features)
1451 {
df7e5265 1452 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1453 {
df7e5265 1454 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
1455 return tdesc_i386_avx512_linux;
1456
df7e5265 1457 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
1458 return tdesc_i386_mpx_linux;
1459
df7e5265 1460 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
1461 return tdesc_i386_avx_linux;
1462
1463 default:
1464 return tdesc_i386_linux;
1465 }
1466 }
3aee8918
PA
1467 else
1468 return tdesc_i386_linux;
1469 }
1470
1471 gdb_assert_not_reached ("failed to return tdesc");
1472}
1473
1474/* Callback for find_inferior. Stops iteration when a thread with a
1475 given PID is found. */
1476
1477static int
1478same_process_callback (struct inferior_list_entry *entry, void *data)
1479{
1480 int pid = *(int *) data;
1481
1482 return (ptid_get_pid (entry->id) == pid);
1483}
1484
1485/* Callback for for_each_inferior. Calls the arch_setup routine for
1486 each process. */
1487
1488static void
1489x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1490{
1491 int pid = ptid_get_pid (entry->id);
1492
1493 /* Look up any thread of this processes. */
0bfdf32f 1494 current_thread
3aee8918
PA
1495 = (struct thread_info *) find_inferior (&all_threads,
1496 same_process_callback, &pid);
1497
1498 the_low_target.arch_setup ();
1499}
1500
1501/* Update all the target description of all processes; a new GDB
1502 connected, and it may or not support xml target descriptions. */
1503
1504static void
1505x86_linux_update_xmltarget (void)
1506{
0bfdf32f 1507 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1508
1509 /* Before changing the register cache's internal layout, flush the
1510 contents of the current valid caches back to the threads, and
1511 release the current regcache objects. */
1512 regcache_release ();
1513
1514 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1515
0bfdf32f 1516 current_thread = saved_thread;
1570b33e
L
1517}
1518
1519/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1520 PTRACE_GETREGSET. */
1521
1522static void
1523x86_linux_process_qsupported (const char *query)
1524{
1525 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1526 with "i386" in qSupported query, it supports x86 XML target
1527 descriptions. */
1528 use_xml = 0;
61012eef 1529 if (query != NULL && startswith (query, "xmlRegisters="))
1570b33e
L
1530 {
1531 char *copy = xstrdup (query + 13);
1532 char *p;
1533
1534 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1535 {
1536 if (strcmp (p, "i386") == 0)
1537 {
1538 use_xml = 1;
1539 break;
1540 }
1541 }
1542
1543 free (copy);
1544 }
1545
1546 x86_linux_update_xmltarget ();
1547}
1548
3aee8918 1549/* Common for x86/x86-64. */
d0722149 1550
3aee8918
PA
1551static struct regsets_info x86_regsets_info =
1552 {
1553 x86_regsets, /* regsets */
1554 0, /* num_regsets */
1555 NULL, /* disabled_regsets */
1556 };
214d508e
L
1557
1558#ifdef __x86_64__
3aee8918
PA
1559static struct regs_info amd64_linux_regs_info =
1560 {
1561 NULL, /* regset_bitmap */
1562 NULL, /* usrregs_info */
1563 &x86_regsets_info
1564 };
d0722149 1565#endif
3aee8918
PA
1566static struct usrregs_info i386_linux_usrregs_info =
1567 {
1568 I386_NUM_REGS,
1569 i386_regmap,
1570 };
d0722149 1571
3aee8918
PA
1572static struct regs_info i386_linux_regs_info =
1573 {
1574 NULL, /* regset_bitmap */
1575 &i386_linux_usrregs_info,
1576 &x86_regsets_info
1577 };
d0722149 1578
3aee8918
PA
1579const struct regs_info *
1580x86_linux_regs_info (void)
1581{
1582#ifdef __x86_64__
1583 if (is_64bit_tdesc ())
1584 return &amd64_linux_regs_info;
1585 else
1586#endif
1587 return &i386_linux_regs_info;
1588}
d0722149 1589
3aee8918
PA
1590/* Initialize the target description for the architecture of the
1591 inferior. */
1570b33e 1592
3aee8918
PA
1593static void
1594x86_arch_setup (void)
1595{
1596 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1597}
1598
219f2f23
PA
1599static int
1600x86_supports_tracepoints (void)
1601{
1602 return 1;
1603}
1604
fa593d66
PA
1605static void
1606append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1607{
1608 write_inferior_memory (*to, buf, len);
1609 *to += len;
1610}
1611
1612static int
1613push_opcode (unsigned char *buf, char *op)
1614{
1615 unsigned char *buf_org = buf;
1616
1617 while (1)
1618 {
1619 char *endptr;
1620 unsigned long ul = strtoul (op, &endptr, 16);
1621
1622 if (endptr == op)
1623 break;
1624
1625 *buf++ = ul;
1626 op = endptr;
1627 }
1628
1629 return buf - buf_org;
1630}
1631
1632#ifdef __x86_64__
1633
1634/* Build a jump pad that saves registers and calls a collection
1635 function. Writes a jump instruction to the jump pad to
1636 JJUMPAD_INSN. The caller is responsible to write it in at the
1637 tracepoint address. */
1638
1639static int
1640amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1641 CORE_ADDR collector,
1642 CORE_ADDR lockaddr,
1643 ULONGEST orig_size,
1644 CORE_ADDR *jump_entry,
405f8e94
SS
1645 CORE_ADDR *trampoline,
1646 ULONGEST *trampoline_size,
fa593d66
PA
1647 unsigned char *jjump_pad_insn,
1648 ULONGEST *jjump_pad_insn_size,
1649 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1650 CORE_ADDR *adjusted_insn_addr_end,
1651 char *err)
fa593d66
PA
1652{
1653 unsigned char buf[40];
1654 int i, offset;
f4647387
YQ
1655 int64_t loffset;
1656
fa593d66
PA
1657 CORE_ADDR buildaddr = *jump_entry;
1658
1659 /* Build the jump pad. */
1660
1661 /* First, do tracepoint data collection. Save registers. */
1662 i = 0;
1663 /* Need to ensure stack pointer saved first. */
1664 buf[i++] = 0x54; /* push %rsp */
1665 buf[i++] = 0x55; /* push %rbp */
1666 buf[i++] = 0x57; /* push %rdi */
1667 buf[i++] = 0x56; /* push %rsi */
1668 buf[i++] = 0x52; /* push %rdx */
1669 buf[i++] = 0x51; /* push %rcx */
1670 buf[i++] = 0x53; /* push %rbx */
1671 buf[i++] = 0x50; /* push %rax */
1672 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1673 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1674 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1675 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1676 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1677 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1678 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1679 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1680 buf[i++] = 0x9c; /* pushfq */
1681 buf[i++] = 0x48; /* movl <addr>,%rdi */
1682 buf[i++] = 0xbf;
1683 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1684 i += sizeof (unsigned long);
1685 buf[i++] = 0x57; /* push %rdi */
1686 append_insns (&buildaddr, i, buf);
1687
1688 /* Stack space for the collecting_t object. */
1689 i = 0;
1690 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1691 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1692 memcpy (buf + i, &tpoint, 8);
1693 i += 8;
1694 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1695 i += push_opcode (&buf[i],
1696 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1697 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1698 append_insns (&buildaddr, i, buf);
1699
1700 /* spin-lock. */
1701 i = 0;
1702 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1703 memcpy (&buf[i], (void *) &lockaddr, 8);
1704 i += 8;
1705 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1706 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1707 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1708 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1709 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1710 append_insns (&buildaddr, i, buf);
1711
1712 /* Set up the gdb_collect call. */
1713 /* At this point, (stack pointer + 0x18) is the base of our saved
1714 register block. */
1715
1716 i = 0;
1717 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1718 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1719
1720 /* tpoint address may be 64-bit wide. */
1721 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1722 memcpy (buf + i, &tpoint, 8);
1723 i += 8;
1724 append_insns (&buildaddr, i, buf);
1725
1726 /* The collector function being in the shared library, may be
1727 >31-bits away off the jump pad. */
1728 i = 0;
1729 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1730 memcpy (buf + i, &collector, 8);
1731 i += 8;
1732 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1733 append_insns (&buildaddr, i, buf);
1734
1735 /* Clear the spin-lock. */
1736 i = 0;
1737 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1738 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1739 memcpy (buf + i, &lockaddr, 8);
1740 i += 8;
1741 append_insns (&buildaddr, i, buf);
1742
1743 /* Remove stack that had been used for the collect_t object. */
1744 i = 0;
1745 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1746 append_insns (&buildaddr, i, buf);
1747
1748 /* Restore register state. */
1749 i = 0;
1750 buf[i++] = 0x48; /* add $0x8,%rsp */
1751 buf[i++] = 0x83;
1752 buf[i++] = 0xc4;
1753 buf[i++] = 0x08;
1754 buf[i++] = 0x9d; /* popfq */
1755 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1756 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1757 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1758 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1759 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1760 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1761 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1762 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1763 buf[i++] = 0x58; /* pop %rax */
1764 buf[i++] = 0x5b; /* pop %rbx */
1765 buf[i++] = 0x59; /* pop %rcx */
1766 buf[i++] = 0x5a; /* pop %rdx */
1767 buf[i++] = 0x5e; /* pop %rsi */
1768 buf[i++] = 0x5f; /* pop %rdi */
1769 buf[i++] = 0x5d; /* pop %rbp */
1770 buf[i++] = 0x5c; /* pop %rsp */
1771 append_insns (&buildaddr, i, buf);
1772
1773 /* Now, adjust the original instruction to execute in the jump
1774 pad. */
1775 *adjusted_insn_addr = buildaddr;
1776 relocate_instruction (&buildaddr, tpaddr);
1777 *adjusted_insn_addr_end = buildaddr;
1778
1779 /* Finally, write a jump back to the program. */
f4647387
YQ
1780
1781 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1782 if (loffset > INT_MAX || loffset < INT_MIN)
1783 {
1784 sprintf (err,
1785 "E.Jump back from jump pad too far from tracepoint "
1786 "(offset 0x%" PRIx64 " > int32).", loffset);
1787 return 1;
1788 }
1789
1790 offset = (int) loffset;
fa593d66
PA
1791 memcpy (buf, jump_insn, sizeof (jump_insn));
1792 memcpy (buf + 1, &offset, 4);
1793 append_insns (&buildaddr, sizeof (jump_insn), buf);
1794
1795 /* The jump pad is now built. Wire in a jump to our jump pad. This
1796 is always done last (by our caller actually), so that we can
1797 install fast tracepoints with threads running. This relies on
1798 the agent's atomic write support. */
f4647387
YQ
1799 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1800 if (loffset > INT_MAX || loffset < INT_MIN)
1801 {
1802 sprintf (err,
1803 "E.Jump pad too far from tracepoint "
1804 "(offset 0x%" PRIx64 " > int32).", loffset);
1805 return 1;
1806 }
1807
1808 offset = (int) loffset;
1809
fa593d66
PA
1810 memcpy (buf, jump_insn, sizeof (jump_insn));
1811 memcpy (buf + 1, &offset, 4);
1812 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1813 *jjump_pad_insn_size = sizeof (jump_insn);
1814
1815 /* Return the end address of our pad. */
1816 *jump_entry = buildaddr;
1817
1818 return 0;
1819}
1820
1821#endif /* __x86_64__ */
1822
1823/* Build a jump pad that saves registers and calls a collection
1824 function. Writes a jump instruction to the jump pad to
1825 JJUMPAD_INSN. The caller is responsible to write it in at the
1826 tracepoint address. */
1827
1828static int
1829i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1830 CORE_ADDR collector,
1831 CORE_ADDR lockaddr,
1832 ULONGEST orig_size,
1833 CORE_ADDR *jump_entry,
405f8e94
SS
1834 CORE_ADDR *trampoline,
1835 ULONGEST *trampoline_size,
fa593d66
PA
1836 unsigned char *jjump_pad_insn,
1837 ULONGEST *jjump_pad_insn_size,
1838 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1839 CORE_ADDR *adjusted_insn_addr_end,
1840 char *err)
fa593d66
PA
1841{
1842 unsigned char buf[0x100];
1843 int i, offset;
1844 CORE_ADDR buildaddr = *jump_entry;
1845
1846 /* Build the jump pad. */
1847
1848 /* First, do tracepoint data collection. Save registers. */
1849 i = 0;
1850 buf[i++] = 0x60; /* pushad */
1851 buf[i++] = 0x68; /* push tpaddr aka $pc */
1852 *((int *)(buf + i)) = (int) tpaddr;
1853 i += 4;
1854 buf[i++] = 0x9c; /* pushf */
1855 buf[i++] = 0x1e; /* push %ds */
1856 buf[i++] = 0x06; /* push %es */
1857 buf[i++] = 0x0f; /* push %fs */
1858 buf[i++] = 0xa0;
1859 buf[i++] = 0x0f; /* push %gs */
1860 buf[i++] = 0xa8;
1861 buf[i++] = 0x16; /* push %ss */
1862 buf[i++] = 0x0e; /* push %cs */
1863 append_insns (&buildaddr, i, buf);
1864
1865 /* Stack space for the collecting_t object. */
1866 i = 0;
1867 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1868
1869 /* Build the object. */
1870 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1871 memcpy (buf + i, &tpoint, 4);
1872 i += 4;
1873 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1874
1875 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1876 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1877 append_insns (&buildaddr, i, buf);
1878
1879 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1880 If we cared for it, this could be using xchg alternatively. */
1881
1882 i = 0;
1883 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1884 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1885 %esp,<lockaddr> */
1886 memcpy (&buf[i], (void *) &lockaddr, 4);
1887 i += 4;
1888 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1889 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1890 append_insns (&buildaddr, i, buf);
1891
1892
1893 /* Set up arguments to the gdb_collect call. */
1894 i = 0;
1895 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1896 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1897 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1898 append_insns (&buildaddr, i, buf);
1899
1900 i = 0;
1901 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1902 append_insns (&buildaddr, i, buf);
1903
1904 i = 0;
1905 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1906 memcpy (&buf[i], (void *) &tpoint, 4);
1907 i += 4;
1908 append_insns (&buildaddr, i, buf);
1909
1910 buf[0] = 0xe8; /* call <reladdr> */
1911 offset = collector - (buildaddr + sizeof (jump_insn));
1912 memcpy (buf + 1, &offset, 4);
1913 append_insns (&buildaddr, 5, buf);
1914 /* Clean up after the call. */
1915 buf[0] = 0x83; /* add $0x8,%esp */
1916 buf[1] = 0xc4;
1917 buf[2] = 0x08;
1918 append_insns (&buildaddr, 3, buf);
1919
1920
1921 /* Clear the spin-lock. This would need the LOCK prefix on older
1922 broken archs. */
1923 i = 0;
1924 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1925 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1926 memcpy (buf + i, &lockaddr, 4);
1927 i += 4;
1928 append_insns (&buildaddr, i, buf);
1929
1930
1931 /* Remove stack that had been used for the collect_t object. */
1932 i = 0;
1933 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1934 append_insns (&buildaddr, i, buf);
1935
1936 i = 0;
1937 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1938 buf[i++] = 0xc4;
1939 buf[i++] = 0x04;
1940 buf[i++] = 0x17; /* pop %ss */
1941 buf[i++] = 0x0f; /* pop %gs */
1942 buf[i++] = 0xa9;
1943 buf[i++] = 0x0f; /* pop %fs */
1944 buf[i++] = 0xa1;
1945 buf[i++] = 0x07; /* pop %es */
405f8e94 1946 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1947 buf[i++] = 0x9d; /* popf */
1948 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1949 buf[i++] = 0xc4;
1950 buf[i++] = 0x04;
1951 buf[i++] = 0x61; /* popad */
1952 append_insns (&buildaddr, i, buf);
1953
1954 /* Now, adjust the original instruction to execute in the jump
1955 pad. */
1956 *adjusted_insn_addr = buildaddr;
1957 relocate_instruction (&buildaddr, tpaddr);
1958 *adjusted_insn_addr_end = buildaddr;
1959
1960 /* Write the jump back to the program. */
1961 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1962 memcpy (buf, jump_insn, sizeof (jump_insn));
1963 memcpy (buf + 1, &offset, 4);
1964 append_insns (&buildaddr, sizeof (jump_insn), buf);
1965
1966 /* The jump pad is now built. Wire in a jump to our jump pad. This
1967 is always done last (by our caller actually), so that we can
1968 install fast tracepoints with threads running. This relies on
1969 the agent's atomic write support. */
405f8e94
SS
1970 if (orig_size == 4)
1971 {
1972 /* Create a trampoline. */
1973 *trampoline_size = sizeof (jump_insn);
1974 if (!claim_trampoline_space (*trampoline_size, trampoline))
1975 {
1976 /* No trampoline space available. */
1977 strcpy (err,
1978 "E.Cannot allocate trampoline space needed for fast "
1979 "tracepoints on 4-byte instructions.");
1980 return 1;
1981 }
1982
1983 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1984 memcpy (buf, jump_insn, sizeof (jump_insn));
1985 memcpy (buf + 1, &offset, 4);
1986 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1987
1988 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1989 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1990 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1991 memcpy (buf + 2, &offset, 2);
1992 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1993 *jjump_pad_insn_size = sizeof (small_jump_insn);
1994 }
1995 else
1996 {
1997 /* Else use a 32-bit relative jump instruction. */
1998 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1999 memcpy (buf, jump_insn, sizeof (jump_insn));
2000 memcpy (buf + 1, &offset, 4);
2001 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
2002 *jjump_pad_insn_size = sizeof (jump_insn);
2003 }
fa593d66
PA
2004
2005 /* Return the end address of our pad. */
2006 *jump_entry = buildaddr;
2007
2008 return 0;
2009}
2010
2011static int
2012x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2013 CORE_ADDR collector,
2014 CORE_ADDR lockaddr,
2015 ULONGEST orig_size,
2016 CORE_ADDR *jump_entry,
405f8e94
SS
2017 CORE_ADDR *trampoline,
2018 ULONGEST *trampoline_size,
fa593d66
PA
2019 unsigned char *jjump_pad_insn,
2020 ULONGEST *jjump_pad_insn_size,
2021 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
2022 CORE_ADDR *adjusted_insn_addr_end,
2023 char *err)
fa593d66
PA
2024{
2025#ifdef __x86_64__
3aee8918 2026 if (is_64bit_tdesc ())
fa593d66
PA
2027 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2028 collector, lockaddr,
2029 orig_size, jump_entry,
405f8e94 2030 trampoline, trampoline_size,
fa593d66
PA
2031 jjump_pad_insn,
2032 jjump_pad_insn_size,
2033 adjusted_insn_addr,
405f8e94
SS
2034 adjusted_insn_addr_end,
2035 err);
fa593d66
PA
2036#endif
2037
2038 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2039 collector, lockaddr,
2040 orig_size, jump_entry,
405f8e94 2041 trampoline, trampoline_size,
fa593d66
PA
2042 jjump_pad_insn,
2043 jjump_pad_insn_size,
2044 adjusted_insn_addr,
405f8e94
SS
2045 adjusted_insn_addr_end,
2046 err);
2047}
2048
2049/* Return the minimum instruction length for fast tracepoints on x86/x86-64
2050 architectures. */
2051
2052static int
2053x86_get_min_fast_tracepoint_insn_len (void)
2054{
2055 static int warned_about_fast_tracepoints = 0;
2056
2057#ifdef __x86_64__
2058 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2059 used for fast tracepoints. */
3aee8918 2060 if (is_64bit_tdesc ())
405f8e94
SS
2061 return 5;
2062#endif
2063
58b4daa5 2064 if (agent_loaded_p ())
405f8e94
SS
2065 {
2066 char errbuf[IPA_BUFSIZ];
2067
2068 errbuf[0] = '\0';
2069
2070 /* On x86, if trampolines are available, then 4-byte jump instructions
2071 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2072 with a 4-byte offset are used instead. */
2073 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2074 return 4;
2075 else
2076 {
2077 /* GDB has no channel to explain to user why a shorter fast
2078 tracepoint is not possible, but at least make GDBserver
2079 mention that something has gone awry. */
2080 if (!warned_about_fast_tracepoints)
2081 {
2082 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2083 warned_about_fast_tracepoints = 1;
2084 }
2085 return 5;
2086 }
2087 }
2088 else
2089 {
2090 /* Indicate that the minimum length is currently unknown since the IPA
2091 has not loaded yet. */
2092 return 0;
2093 }
fa593d66
PA
2094}
2095
6a271cae
PA
2096static void
2097add_insns (unsigned char *start, int len)
2098{
2099 CORE_ADDR buildaddr = current_insn_ptr;
2100
2101 if (debug_threads)
87ce2a04
DE
2102 debug_printf ("Adding %d bytes of insn at %s\n",
2103 len, paddress (buildaddr));
6a271cae
PA
2104
2105 append_insns (&buildaddr, len, start);
2106 current_insn_ptr = buildaddr;
2107}
2108
6a271cae
PA
2109/* Our general strategy for emitting code is to avoid specifying raw
2110 bytes whenever possible, and instead copy a block of inline asm
2111 that is embedded in the function. This is a little messy, because
2112 we need to keep the compiler from discarding what looks like dead
2113 code, plus suppress various warnings. */
2114
9e4344e5
PA
2115#define EMIT_ASM(NAME, INSNS) \
2116 do \
2117 { \
2118 extern unsigned char start_ ## NAME, end_ ## NAME; \
2119 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2120 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2121 "\t" "start_" #NAME ":" \
2122 "\t" INSNS "\n" \
2123 "\t" "end_" #NAME ":"); \
2124 } while (0)
6a271cae
PA
2125
2126#ifdef __x86_64__
2127
2128#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2129 do \
2130 { \
2131 extern unsigned char start_ ## NAME, end_ ## NAME; \
2132 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2133 __asm__ (".code32\n" \
2134 "\t" "jmp end_" #NAME "\n" \
2135 "\t" "start_" #NAME ":\n" \
2136 "\t" INSNS "\n" \
2137 "\t" "end_" #NAME ":\n" \
2138 ".code64\n"); \
2139 } while (0)
6a271cae
PA
2140
2141#else
2142
2143#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2144
2145#endif
2146
2147#ifdef __x86_64__
2148
2149static void
2150amd64_emit_prologue (void)
2151{
2152 EMIT_ASM (amd64_prologue,
2153 "pushq %rbp\n\t"
2154 "movq %rsp,%rbp\n\t"
2155 "sub $0x20,%rsp\n\t"
2156 "movq %rdi,-8(%rbp)\n\t"
2157 "movq %rsi,-16(%rbp)");
2158}
2159
2160
2161static void
2162amd64_emit_epilogue (void)
2163{
2164 EMIT_ASM (amd64_epilogue,
2165 "movq -16(%rbp),%rdi\n\t"
2166 "movq %rax,(%rdi)\n\t"
2167 "xor %rax,%rax\n\t"
2168 "leave\n\t"
2169 "ret");
2170}
2171
2172static void
2173amd64_emit_add (void)
2174{
2175 EMIT_ASM (amd64_add,
2176 "add (%rsp),%rax\n\t"
2177 "lea 0x8(%rsp),%rsp");
2178}
2179
2180static void
2181amd64_emit_sub (void)
2182{
2183 EMIT_ASM (amd64_sub,
2184 "sub %rax,(%rsp)\n\t"
2185 "pop %rax");
2186}
2187
2188static void
2189amd64_emit_mul (void)
2190{
2191 emit_error = 1;
2192}
2193
2194static void
2195amd64_emit_lsh (void)
2196{
2197 emit_error = 1;
2198}
2199
2200static void
2201amd64_emit_rsh_signed (void)
2202{
2203 emit_error = 1;
2204}
2205
2206static void
2207amd64_emit_rsh_unsigned (void)
2208{
2209 emit_error = 1;
2210}
2211
2212static void
2213amd64_emit_ext (int arg)
2214{
2215 switch (arg)
2216 {
2217 case 8:
2218 EMIT_ASM (amd64_ext_8,
2219 "cbtw\n\t"
2220 "cwtl\n\t"
2221 "cltq");
2222 break;
2223 case 16:
2224 EMIT_ASM (amd64_ext_16,
2225 "cwtl\n\t"
2226 "cltq");
2227 break;
2228 case 32:
2229 EMIT_ASM (amd64_ext_32,
2230 "cltq");
2231 break;
2232 default:
2233 emit_error = 1;
2234 }
2235}
2236
2237static void
2238amd64_emit_log_not (void)
2239{
2240 EMIT_ASM (amd64_log_not,
2241 "test %rax,%rax\n\t"
2242 "sete %cl\n\t"
2243 "movzbq %cl,%rax");
2244}
2245
2246static void
2247amd64_emit_bit_and (void)
2248{
2249 EMIT_ASM (amd64_and,
2250 "and (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2252}
2253
2254static void
2255amd64_emit_bit_or (void)
2256{
2257 EMIT_ASM (amd64_or,
2258 "or (%rsp),%rax\n\t"
2259 "lea 0x8(%rsp),%rsp");
2260}
2261
2262static void
2263amd64_emit_bit_xor (void)
2264{
2265 EMIT_ASM (amd64_xor,
2266 "xor (%rsp),%rax\n\t"
2267 "lea 0x8(%rsp),%rsp");
2268}
2269
2270static void
2271amd64_emit_bit_not (void)
2272{
2273 EMIT_ASM (amd64_bit_not,
2274 "xorq $0xffffffffffffffff,%rax");
2275}
2276
2277static void
2278amd64_emit_equal (void)
2279{
2280 EMIT_ASM (amd64_equal,
2281 "cmp %rax,(%rsp)\n\t"
2282 "je .Lamd64_equal_true\n\t"
2283 "xor %rax,%rax\n\t"
2284 "jmp .Lamd64_equal_end\n\t"
2285 ".Lamd64_equal_true:\n\t"
2286 "mov $0x1,%rax\n\t"
2287 ".Lamd64_equal_end:\n\t"
2288 "lea 0x8(%rsp),%rsp");
2289}
2290
2291static void
2292amd64_emit_less_signed (void)
2293{
2294 EMIT_ASM (amd64_less_signed,
2295 "cmp %rax,(%rsp)\n\t"
2296 "jl .Lamd64_less_signed_true\n\t"
2297 "xor %rax,%rax\n\t"
2298 "jmp .Lamd64_less_signed_end\n\t"
2299 ".Lamd64_less_signed_true:\n\t"
2300 "mov $1,%rax\n\t"
2301 ".Lamd64_less_signed_end:\n\t"
2302 "lea 0x8(%rsp),%rsp");
2303}
2304
2305static void
2306amd64_emit_less_unsigned (void)
2307{
2308 EMIT_ASM (amd64_less_unsigned,
2309 "cmp %rax,(%rsp)\n\t"
2310 "jb .Lamd64_less_unsigned_true\n\t"
2311 "xor %rax,%rax\n\t"
2312 "jmp .Lamd64_less_unsigned_end\n\t"
2313 ".Lamd64_less_unsigned_true:\n\t"
2314 "mov $1,%rax\n\t"
2315 ".Lamd64_less_unsigned_end:\n\t"
2316 "lea 0x8(%rsp),%rsp");
2317}
2318
2319static void
2320amd64_emit_ref (int size)
2321{
2322 switch (size)
2323 {
2324 case 1:
2325 EMIT_ASM (amd64_ref1,
2326 "movb (%rax),%al");
2327 break;
2328 case 2:
2329 EMIT_ASM (amd64_ref2,
2330 "movw (%rax),%ax");
2331 break;
2332 case 4:
2333 EMIT_ASM (amd64_ref4,
2334 "movl (%rax),%eax");
2335 break;
2336 case 8:
2337 EMIT_ASM (amd64_ref8,
2338 "movq (%rax),%rax");
2339 break;
2340 }
2341}
2342
2343static void
2344amd64_emit_if_goto (int *offset_p, int *size_p)
2345{
2346 EMIT_ASM (amd64_if_goto,
2347 "mov %rax,%rcx\n\t"
2348 "pop %rax\n\t"
2349 "cmp $0,%rcx\n\t"
2350 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2351 if (offset_p)
2352 *offset_p = 10;
2353 if (size_p)
2354 *size_p = 4;
2355}
2356
2357static void
2358amd64_emit_goto (int *offset_p, int *size_p)
2359{
2360 EMIT_ASM (amd64_goto,
2361 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2362 if (offset_p)
2363 *offset_p = 1;
2364 if (size_p)
2365 *size_p = 4;
2366}
2367
2368static void
2369amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2370{
2371 int diff = (to - (from + size));
2372 unsigned char buf[sizeof (int)];
2373
2374 if (size != 4)
2375 {
2376 emit_error = 1;
2377 return;
2378 }
2379
2380 memcpy (buf, &diff, sizeof (int));
2381 write_inferior_memory (from, buf, sizeof (int));
2382}
2383
2384static void
4e29fb54 2385amd64_emit_const (LONGEST num)
6a271cae
PA
2386{
2387 unsigned char buf[16];
2388 int i;
2389 CORE_ADDR buildaddr = current_insn_ptr;
2390
2391 i = 0;
2392 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2393 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2394 i += 8;
2395 append_insns (&buildaddr, i, buf);
2396 current_insn_ptr = buildaddr;
2397}
2398
2399static void
2400amd64_emit_call (CORE_ADDR fn)
2401{
2402 unsigned char buf[16];
2403 int i;
2404 CORE_ADDR buildaddr;
4e29fb54 2405 LONGEST offset64;
6a271cae
PA
2406
2407 /* The destination function being in the shared library, may be
2408 >31-bits away off the compiled code pad. */
2409
2410 buildaddr = current_insn_ptr;
2411
2412 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2413
2414 i = 0;
2415
2416 if (offset64 > INT_MAX || offset64 < INT_MIN)
2417 {
2418 /* Offset is too large for a call. Use callq, but that requires
2419 a register, so avoid it if possible. Use r10, since it is
2420 call-clobbered, we don't have to push/pop it. */
2421 buf[i++] = 0x48; /* mov $fn,%r10 */
2422 buf[i++] = 0xba;
2423 memcpy (buf + i, &fn, 8);
2424 i += 8;
2425 buf[i++] = 0xff; /* callq *%r10 */
2426 buf[i++] = 0xd2;
2427 }
2428 else
2429 {
2430 int offset32 = offset64; /* we know we can't overflow here. */
2431 memcpy (buf + i, &offset32, 4);
2432 i += 4;
2433 }
2434
2435 append_insns (&buildaddr, i, buf);
2436 current_insn_ptr = buildaddr;
2437}
2438
2439static void
2440amd64_emit_reg (int reg)
2441{
2442 unsigned char buf[16];
2443 int i;
2444 CORE_ADDR buildaddr;
2445
2446 /* Assume raw_regs is still in %rdi. */
2447 buildaddr = current_insn_ptr;
2448 i = 0;
2449 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2450 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2451 i += 4;
2452 append_insns (&buildaddr, i, buf);
2453 current_insn_ptr = buildaddr;
2454 amd64_emit_call (get_raw_reg_func_addr ());
2455}
2456
2457static void
2458amd64_emit_pop (void)
2459{
2460 EMIT_ASM (amd64_pop,
2461 "pop %rax");
2462}
2463
2464static void
2465amd64_emit_stack_flush (void)
2466{
2467 EMIT_ASM (amd64_stack_flush,
2468 "push %rax");
2469}
2470
2471static void
2472amd64_emit_zero_ext (int arg)
2473{
2474 switch (arg)
2475 {
2476 case 8:
2477 EMIT_ASM (amd64_zero_ext_8,
2478 "and $0xff,%rax");
2479 break;
2480 case 16:
2481 EMIT_ASM (amd64_zero_ext_16,
2482 "and $0xffff,%rax");
2483 break;
2484 case 32:
2485 EMIT_ASM (amd64_zero_ext_32,
2486 "mov $0xffffffff,%rcx\n\t"
2487 "and %rcx,%rax");
2488 break;
2489 default:
2490 emit_error = 1;
2491 }
2492}
2493
2494static void
2495amd64_emit_swap (void)
2496{
2497 EMIT_ASM (amd64_swap,
2498 "mov %rax,%rcx\n\t"
2499 "pop %rax\n\t"
2500 "push %rcx");
2501}
2502
2503static void
2504amd64_emit_stack_adjust (int n)
2505{
2506 unsigned char buf[16];
2507 int i;
2508 CORE_ADDR buildaddr = current_insn_ptr;
2509
2510 i = 0;
2511 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2512 buf[i++] = 0x8d;
2513 buf[i++] = 0x64;
2514 buf[i++] = 0x24;
2515 /* This only handles adjustments up to 16, but we don't expect any more. */
2516 buf[i++] = n * 8;
2517 append_insns (&buildaddr, i, buf);
2518 current_insn_ptr = buildaddr;
2519}
2520
2521/* FN's prototype is `LONGEST(*fn)(int)'. */
2522
2523static void
2524amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2525{
2526 unsigned char buf[16];
2527 int i;
2528 CORE_ADDR buildaddr;
2529
2530 buildaddr = current_insn_ptr;
2531 i = 0;
2532 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2533 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2534 i += 4;
2535 append_insns (&buildaddr, i, buf);
2536 current_insn_ptr = buildaddr;
2537 amd64_emit_call (fn);
2538}
2539
4e29fb54 2540/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2541
2542static void
2543amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2544{
2545 unsigned char buf[16];
2546 int i;
2547 CORE_ADDR buildaddr;
2548
2549 buildaddr = current_insn_ptr;
2550 i = 0;
2551 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2552 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2553 i += 4;
2554 append_insns (&buildaddr, i, buf);
2555 current_insn_ptr = buildaddr;
2556 EMIT_ASM (amd64_void_call_2_a,
2557 /* Save away a copy of the stack top. */
2558 "push %rax\n\t"
2559 /* Also pass top as the second argument. */
2560 "mov %rax,%rsi");
2561 amd64_emit_call (fn);
2562 EMIT_ASM (amd64_void_call_2_b,
2563 /* Restore the stack top, %rax may have been trashed. */
2564 "pop %rax");
2565}
2566
6b9801d4
SS
2567void
2568amd64_emit_eq_goto (int *offset_p, int *size_p)
2569{
2570 EMIT_ASM (amd64_eq,
2571 "cmp %rax,(%rsp)\n\t"
2572 "jne .Lamd64_eq_fallthru\n\t"
2573 "lea 0x8(%rsp),%rsp\n\t"
2574 "pop %rax\n\t"
2575 /* jmp, but don't trust the assembler to choose the right jump */
2576 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2577 ".Lamd64_eq_fallthru:\n\t"
2578 "lea 0x8(%rsp),%rsp\n\t"
2579 "pop %rax");
2580
2581 if (offset_p)
2582 *offset_p = 13;
2583 if (size_p)
2584 *size_p = 4;
2585}
2586
2587void
2588amd64_emit_ne_goto (int *offset_p, int *size_p)
2589{
2590 EMIT_ASM (amd64_ne,
2591 "cmp %rax,(%rsp)\n\t"
2592 "je .Lamd64_ne_fallthru\n\t"
2593 "lea 0x8(%rsp),%rsp\n\t"
2594 "pop %rax\n\t"
2595 /* jmp, but don't trust the assembler to choose the right jump */
2596 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2597 ".Lamd64_ne_fallthru:\n\t"
2598 "lea 0x8(%rsp),%rsp\n\t"
2599 "pop %rax");
2600
2601 if (offset_p)
2602 *offset_p = 13;
2603 if (size_p)
2604 *size_p = 4;
2605}
2606
2607void
2608amd64_emit_lt_goto (int *offset_p, int *size_p)
2609{
2610 EMIT_ASM (amd64_lt,
2611 "cmp %rax,(%rsp)\n\t"
2612 "jnl .Lamd64_lt_fallthru\n\t"
2613 "lea 0x8(%rsp),%rsp\n\t"
2614 "pop %rax\n\t"
2615 /* jmp, but don't trust the assembler to choose the right jump */
2616 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2617 ".Lamd64_lt_fallthru:\n\t"
2618 "lea 0x8(%rsp),%rsp\n\t"
2619 "pop %rax");
2620
2621 if (offset_p)
2622 *offset_p = 13;
2623 if (size_p)
2624 *size_p = 4;
2625}
2626
2627void
2628amd64_emit_le_goto (int *offset_p, int *size_p)
2629{
2630 EMIT_ASM (amd64_le,
2631 "cmp %rax,(%rsp)\n\t"
2632 "jnle .Lamd64_le_fallthru\n\t"
2633 "lea 0x8(%rsp),%rsp\n\t"
2634 "pop %rax\n\t"
2635 /* jmp, but don't trust the assembler to choose the right jump */
2636 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2637 ".Lamd64_le_fallthru:\n\t"
2638 "lea 0x8(%rsp),%rsp\n\t"
2639 "pop %rax");
2640
2641 if (offset_p)
2642 *offset_p = 13;
2643 if (size_p)
2644 *size_p = 4;
2645}
2646
2647void
2648amd64_emit_gt_goto (int *offset_p, int *size_p)
2649{
2650 EMIT_ASM (amd64_gt,
2651 "cmp %rax,(%rsp)\n\t"
2652 "jng .Lamd64_gt_fallthru\n\t"
2653 "lea 0x8(%rsp),%rsp\n\t"
2654 "pop %rax\n\t"
2655 /* jmp, but don't trust the assembler to choose the right jump */
2656 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2657 ".Lamd64_gt_fallthru:\n\t"
2658 "lea 0x8(%rsp),%rsp\n\t"
2659 "pop %rax");
2660
2661 if (offset_p)
2662 *offset_p = 13;
2663 if (size_p)
2664 *size_p = 4;
2665}
2666
2667void
2668amd64_emit_ge_goto (int *offset_p, int *size_p)
2669{
2670 EMIT_ASM (amd64_ge,
2671 "cmp %rax,(%rsp)\n\t"
2672 "jnge .Lamd64_ge_fallthru\n\t"
2673 ".Lamd64_ge_jump:\n\t"
2674 "lea 0x8(%rsp),%rsp\n\t"
2675 "pop %rax\n\t"
2676 /* jmp, but don't trust the assembler to choose the right jump */
2677 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2678 ".Lamd64_ge_fallthru:\n\t"
2679 "lea 0x8(%rsp),%rsp\n\t"
2680 "pop %rax");
2681
2682 if (offset_p)
2683 *offset_p = 13;
2684 if (size_p)
2685 *size_p = 4;
2686}
2687
6a271cae
PA
2688struct emit_ops amd64_emit_ops =
2689 {
2690 amd64_emit_prologue,
2691 amd64_emit_epilogue,
2692 amd64_emit_add,
2693 amd64_emit_sub,
2694 amd64_emit_mul,
2695 amd64_emit_lsh,
2696 amd64_emit_rsh_signed,
2697 amd64_emit_rsh_unsigned,
2698 amd64_emit_ext,
2699 amd64_emit_log_not,
2700 amd64_emit_bit_and,
2701 amd64_emit_bit_or,
2702 amd64_emit_bit_xor,
2703 amd64_emit_bit_not,
2704 amd64_emit_equal,
2705 amd64_emit_less_signed,
2706 amd64_emit_less_unsigned,
2707 amd64_emit_ref,
2708 amd64_emit_if_goto,
2709 amd64_emit_goto,
2710 amd64_write_goto_address,
2711 amd64_emit_const,
2712 amd64_emit_call,
2713 amd64_emit_reg,
2714 amd64_emit_pop,
2715 amd64_emit_stack_flush,
2716 amd64_emit_zero_ext,
2717 amd64_emit_swap,
2718 amd64_emit_stack_adjust,
2719 amd64_emit_int_call_1,
6b9801d4
SS
2720 amd64_emit_void_call_2,
2721 amd64_emit_eq_goto,
2722 amd64_emit_ne_goto,
2723 amd64_emit_lt_goto,
2724 amd64_emit_le_goto,
2725 amd64_emit_gt_goto,
2726 amd64_emit_ge_goto
6a271cae
PA
2727 };
2728
2729#endif /* __x86_64__ */
2730
2731static void
2732i386_emit_prologue (void)
2733{
2734 EMIT_ASM32 (i386_prologue,
2735 "push %ebp\n\t"
bf15cbda
SS
2736 "mov %esp,%ebp\n\t"
2737 "push %ebx");
6a271cae
PA
2738 /* At this point, the raw regs base address is at 8(%ebp), and the
2739 value pointer is at 12(%ebp). */
2740}
2741
2742static void
2743i386_emit_epilogue (void)
2744{
2745 EMIT_ASM32 (i386_epilogue,
2746 "mov 12(%ebp),%ecx\n\t"
2747 "mov %eax,(%ecx)\n\t"
2748 "mov %ebx,0x4(%ecx)\n\t"
2749 "xor %eax,%eax\n\t"
bf15cbda 2750 "pop %ebx\n\t"
6a271cae
PA
2751 "pop %ebp\n\t"
2752 "ret");
2753}
2754
2755static void
2756i386_emit_add (void)
2757{
2758 EMIT_ASM32 (i386_add,
2759 "add (%esp),%eax\n\t"
2760 "adc 0x4(%esp),%ebx\n\t"
2761 "lea 0x8(%esp),%esp");
2762}
2763
2764static void
2765i386_emit_sub (void)
2766{
2767 EMIT_ASM32 (i386_sub,
2768 "subl %eax,(%esp)\n\t"
2769 "sbbl %ebx,4(%esp)\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx\n\t");
2772}
2773
2774static void
2775i386_emit_mul (void)
2776{
2777 emit_error = 1;
2778}
2779
2780static void
2781i386_emit_lsh (void)
2782{
2783 emit_error = 1;
2784}
2785
2786static void
2787i386_emit_rsh_signed (void)
2788{
2789 emit_error = 1;
2790}
2791
2792static void
2793i386_emit_rsh_unsigned (void)
2794{
2795 emit_error = 1;
2796}
2797
2798static void
2799i386_emit_ext (int arg)
2800{
2801 switch (arg)
2802 {
2803 case 8:
2804 EMIT_ASM32 (i386_ext_8,
2805 "cbtw\n\t"
2806 "cwtl\n\t"
2807 "movl %eax,%ebx\n\t"
2808 "sarl $31,%ebx");
2809 break;
2810 case 16:
2811 EMIT_ASM32 (i386_ext_16,
2812 "cwtl\n\t"
2813 "movl %eax,%ebx\n\t"
2814 "sarl $31,%ebx");
2815 break;
2816 case 32:
2817 EMIT_ASM32 (i386_ext_32,
2818 "movl %eax,%ebx\n\t"
2819 "sarl $31,%ebx");
2820 break;
2821 default:
2822 emit_error = 1;
2823 }
2824}
2825
2826static void
2827i386_emit_log_not (void)
2828{
2829 EMIT_ASM32 (i386_log_not,
2830 "or %ebx,%eax\n\t"
2831 "test %eax,%eax\n\t"
2832 "sete %cl\n\t"
2833 "xor %ebx,%ebx\n\t"
2834 "movzbl %cl,%eax");
2835}
2836
2837static void
2838i386_emit_bit_and (void)
2839{
2840 EMIT_ASM32 (i386_and,
2841 "and (%esp),%eax\n\t"
2842 "and 0x4(%esp),%ebx\n\t"
2843 "lea 0x8(%esp),%esp");
2844}
2845
2846static void
2847i386_emit_bit_or (void)
2848{
2849 EMIT_ASM32 (i386_or,
2850 "or (%esp),%eax\n\t"
2851 "or 0x4(%esp),%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2853}
2854
2855static void
2856i386_emit_bit_xor (void)
2857{
2858 EMIT_ASM32 (i386_xor,
2859 "xor (%esp),%eax\n\t"
2860 "xor 0x4(%esp),%ebx\n\t"
2861 "lea 0x8(%esp),%esp");
2862}
2863
2864static void
2865i386_emit_bit_not (void)
2866{
2867 EMIT_ASM32 (i386_bit_not,
2868 "xor $0xffffffff,%eax\n\t"
2869 "xor $0xffffffff,%ebx\n\t");
2870}
2871
2872static void
2873i386_emit_equal (void)
2874{
2875 EMIT_ASM32 (i386_equal,
2876 "cmpl %ebx,4(%esp)\n\t"
2877 "jne .Li386_equal_false\n\t"
2878 "cmpl %eax,(%esp)\n\t"
2879 "je .Li386_equal_true\n\t"
2880 ".Li386_equal_false:\n\t"
2881 "xor %eax,%eax\n\t"
2882 "jmp .Li386_equal_end\n\t"
2883 ".Li386_equal_true:\n\t"
2884 "mov $1,%eax\n\t"
2885 ".Li386_equal_end:\n\t"
2886 "xor %ebx,%ebx\n\t"
2887 "lea 0x8(%esp),%esp");
2888}
2889
2890static void
2891i386_emit_less_signed (void)
2892{
2893 EMIT_ASM32 (i386_less_signed,
2894 "cmpl %ebx,4(%esp)\n\t"
2895 "jl .Li386_less_signed_true\n\t"
2896 "jne .Li386_less_signed_false\n\t"
2897 "cmpl %eax,(%esp)\n\t"
2898 "jl .Li386_less_signed_true\n\t"
2899 ".Li386_less_signed_false:\n\t"
2900 "xor %eax,%eax\n\t"
2901 "jmp .Li386_less_signed_end\n\t"
2902 ".Li386_less_signed_true:\n\t"
2903 "mov $1,%eax\n\t"
2904 ".Li386_less_signed_end:\n\t"
2905 "xor %ebx,%ebx\n\t"
2906 "lea 0x8(%esp),%esp");
2907}
2908
2909static void
2910i386_emit_less_unsigned (void)
2911{
2912 EMIT_ASM32 (i386_less_unsigned,
2913 "cmpl %ebx,4(%esp)\n\t"
2914 "jb .Li386_less_unsigned_true\n\t"
2915 "jne .Li386_less_unsigned_false\n\t"
2916 "cmpl %eax,(%esp)\n\t"
2917 "jb .Li386_less_unsigned_true\n\t"
2918 ".Li386_less_unsigned_false:\n\t"
2919 "xor %eax,%eax\n\t"
2920 "jmp .Li386_less_unsigned_end\n\t"
2921 ".Li386_less_unsigned_true:\n\t"
2922 "mov $1,%eax\n\t"
2923 ".Li386_less_unsigned_end:\n\t"
2924 "xor %ebx,%ebx\n\t"
2925 "lea 0x8(%esp),%esp");
2926}
2927
2928static void
2929i386_emit_ref (int size)
2930{
2931 switch (size)
2932 {
2933 case 1:
2934 EMIT_ASM32 (i386_ref1,
2935 "movb (%eax),%al");
2936 break;
2937 case 2:
2938 EMIT_ASM32 (i386_ref2,
2939 "movw (%eax),%ax");
2940 break;
2941 case 4:
2942 EMIT_ASM32 (i386_ref4,
2943 "movl (%eax),%eax");
2944 break;
2945 case 8:
2946 EMIT_ASM32 (i386_ref8,
2947 "movl 4(%eax),%ebx\n\t"
2948 "movl (%eax),%eax");
2949 break;
2950 }
2951}
2952
2953static void
2954i386_emit_if_goto (int *offset_p, int *size_p)
2955{
2956 EMIT_ASM32 (i386_if_goto,
2957 "mov %eax,%ecx\n\t"
2958 "or %ebx,%ecx\n\t"
2959 "pop %eax\n\t"
2960 "pop %ebx\n\t"
2961 "cmpl $0,%ecx\n\t"
2962 /* Don't trust the assembler to choose the right jump */
2963 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2964
2965 if (offset_p)
2966 *offset_p = 11; /* be sure that this matches the sequence above */
2967 if (size_p)
2968 *size_p = 4;
2969}
2970
2971static void
2972i386_emit_goto (int *offset_p, int *size_p)
2973{
2974 EMIT_ASM32 (i386_goto,
2975 /* Don't trust the assembler to choose the right jump */
2976 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2977 if (offset_p)
2978 *offset_p = 1;
2979 if (size_p)
2980 *size_p = 4;
2981}
2982
2983static void
2984i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2985{
2986 int diff = (to - (from + size));
2987 unsigned char buf[sizeof (int)];
2988
2989 /* We're only doing 4-byte sizes at the moment. */
2990 if (size != 4)
2991 {
2992 emit_error = 1;
2993 return;
2994 }
2995
2996 memcpy (buf, &diff, sizeof (int));
2997 write_inferior_memory (from, buf, sizeof (int));
2998}
2999
3000static void
4e29fb54 3001i386_emit_const (LONGEST num)
6a271cae
PA
3002{
3003 unsigned char buf[16];
b00ad6ff 3004 int i, hi, lo;
6a271cae
PA
3005 CORE_ADDR buildaddr = current_insn_ptr;
3006
3007 i = 0;
3008 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
3009 lo = num & 0xffffffff;
3010 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
3011 i += 4;
3012 hi = ((num >> 32) & 0xffffffff);
3013 if (hi)
3014 {
3015 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 3016 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
3017 i += 4;
3018 }
3019 else
3020 {
3021 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3022 }
3023 append_insns (&buildaddr, i, buf);
3024 current_insn_ptr = buildaddr;
3025}
3026
3027static void
3028i386_emit_call (CORE_ADDR fn)
3029{
3030 unsigned char buf[16];
3031 int i, offset;
3032 CORE_ADDR buildaddr;
3033
3034 buildaddr = current_insn_ptr;
3035 i = 0;
3036 buf[i++] = 0xe8; /* call <reladdr> */
3037 offset = ((int) fn) - (buildaddr + 5);
3038 memcpy (buf + 1, &offset, 4);
3039 append_insns (&buildaddr, 5, buf);
3040 current_insn_ptr = buildaddr;
3041}
3042
3043static void
3044i386_emit_reg (int reg)
3045{
3046 unsigned char buf[16];
3047 int i;
3048 CORE_ADDR buildaddr;
3049
3050 EMIT_ASM32 (i386_reg_a,
3051 "sub $0x8,%esp");
3052 buildaddr = current_insn_ptr;
3053 i = 0;
3054 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 3055 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
3056 i += 4;
3057 append_insns (&buildaddr, i, buf);
3058 current_insn_ptr = buildaddr;
3059 EMIT_ASM32 (i386_reg_b,
3060 "mov %eax,4(%esp)\n\t"
3061 "mov 8(%ebp),%eax\n\t"
3062 "mov %eax,(%esp)");
3063 i386_emit_call (get_raw_reg_func_addr ());
3064 EMIT_ASM32 (i386_reg_c,
3065 "xor %ebx,%ebx\n\t"
3066 "lea 0x8(%esp),%esp");
3067}
3068
3069static void
3070i386_emit_pop (void)
3071{
3072 EMIT_ASM32 (i386_pop,
3073 "pop %eax\n\t"
3074 "pop %ebx");
3075}
3076
3077static void
3078i386_emit_stack_flush (void)
3079{
3080 EMIT_ASM32 (i386_stack_flush,
3081 "push %ebx\n\t"
3082 "push %eax");
3083}
3084
3085static void
3086i386_emit_zero_ext (int arg)
3087{
3088 switch (arg)
3089 {
3090 case 8:
3091 EMIT_ASM32 (i386_zero_ext_8,
3092 "and $0xff,%eax\n\t"
3093 "xor %ebx,%ebx");
3094 break;
3095 case 16:
3096 EMIT_ASM32 (i386_zero_ext_16,
3097 "and $0xffff,%eax\n\t"
3098 "xor %ebx,%ebx");
3099 break;
3100 case 32:
3101 EMIT_ASM32 (i386_zero_ext_32,
3102 "xor %ebx,%ebx");
3103 break;
3104 default:
3105 emit_error = 1;
3106 }
3107}
3108
3109static void
3110i386_emit_swap (void)
3111{
3112 EMIT_ASM32 (i386_swap,
3113 "mov %eax,%ecx\n\t"
3114 "mov %ebx,%edx\n\t"
3115 "pop %eax\n\t"
3116 "pop %ebx\n\t"
3117 "push %edx\n\t"
3118 "push %ecx");
3119}
3120
3121static void
3122i386_emit_stack_adjust (int n)
3123{
3124 unsigned char buf[16];
3125 int i;
3126 CORE_ADDR buildaddr = current_insn_ptr;
3127
3128 i = 0;
3129 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3130 buf[i++] = 0x64;
3131 buf[i++] = 0x24;
3132 buf[i++] = n * 8;
3133 append_insns (&buildaddr, i, buf);
3134 current_insn_ptr = buildaddr;
3135}
3136
3137/* FN's prototype is `LONGEST(*fn)(int)'. */
3138
3139static void
3140i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3141{
3142 unsigned char buf[16];
3143 int i;
3144 CORE_ADDR buildaddr;
3145
3146 EMIT_ASM32 (i386_int_call_1_a,
3147 /* Reserve a bit of stack space. */
3148 "sub $0x8,%esp");
3149 /* Put the one argument on the stack. */
3150 buildaddr = current_insn_ptr;
3151 i = 0;
3152 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3153 buf[i++] = 0x04;
3154 buf[i++] = 0x24;
b00ad6ff 3155 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3156 i += 4;
3157 append_insns (&buildaddr, i, buf);
3158 current_insn_ptr = buildaddr;
3159 i386_emit_call (fn);
3160 EMIT_ASM32 (i386_int_call_1_c,
3161 "mov %edx,%ebx\n\t"
3162 "lea 0x8(%esp),%esp");
3163}
3164
4e29fb54 3165/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3166
3167static void
3168i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3169{
3170 unsigned char buf[16];
3171 int i;
3172 CORE_ADDR buildaddr;
3173
3174 EMIT_ASM32 (i386_void_call_2_a,
3175 /* Preserve %eax only; we don't have to worry about %ebx. */
3176 "push %eax\n\t"
3177 /* Reserve a bit of stack space for arguments. */
3178 "sub $0x10,%esp\n\t"
3179 /* Copy "top" to the second argument position. (Note that
3180 we can't assume function won't scribble on its
3181 arguments, so don't try to restore from this.) */
3182 "mov %eax,4(%esp)\n\t"
3183 "mov %ebx,8(%esp)");
3184 /* Put the first argument on the stack. */
3185 buildaddr = current_insn_ptr;
3186 i = 0;
3187 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3188 buf[i++] = 0x04;
3189 buf[i++] = 0x24;
b00ad6ff 3190 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3191 i += 4;
3192 append_insns (&buildaddr, i, buf);
3193 current_insn_ptr = buildaddr;
3194 i386_emit_call (fn);
3195 EMIT_ASM32 (i386_void_call_2_b,
3196 "lea 0x10(%esp),%esp\n\t"
3197 /* Restore original stack top. */
3198 "pop %eax");
3199}
3200
6b9801d4
SS
3201
3202void
3203i386_emit_eq_goto (int *offset_p, int *size_p)
3204{
3205 EMIT_ASM32 (eq,
3206 /* Check low half first, more likely to be decider */
3207 "cmpl %eax,(%esp)\n\t"
3208 "jne .Leq_fallthru\n\t"
3209 "cmpl %ebx,4(%esp)\n\t"
3210 "jne .Leq_fallthru\n\t"
3211 "lea 0x8(%esp),%esp\n\t"
3212 "pop %eax\n\t"
3213 "pop %ebx\n\t"
3214 /* jmp, but don't trust the assembler to choose the right jump */
3215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3216 ".Leq_fallthru:\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3218 "pop %eax\n\t"
3219 "pop %ebx");
3220
3221 if (offset_p)
3222 *offset_p = 18;
3223 if (size_p)
3224 *size_p = 4;
3225}
3226
3227void
3228i386_emit_ne_goto (int *offset_p, int *size_p)
3229{
3230 EMIT_ASM32 (ne,
3231 /* Check low half first, more likely to be decider */
3232 "cmpl %eax,(%esp)\n\t"
3233 "jne .Lne_jump\n\t"
3234 "cmpl %ebx,4(%esp)\n\t"
3235 "je .Lne_fallthru\n\t"
3236 ".Lne_jump:\n\t"
3237 "lea 0x8(%esp),%esp\n\t"
3238 "pop %eax\n\t"
3239 "pop %ebx\n\t"
3240 /* jmp, but don't trust the assembler to choose the right jump */
3241 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3242 ".Lne_fallthru:\n\t"
3243 "lea 0x8(%esp),%esp\n\t"
3244 "pop %eax\n\t"
3245 "pop %ebx");
3246
3247 if (offset_p)
3248 *offset_p = 18;
3249 if (size_p)
3250 *size_p = 4;
3251}
3252
3253void
3254i386_emit_lt_goto (int *offset_p, int *size_p)
3255{
3256 EMIT_ASM32 (lt,
3257 "cmpl %ebx,4(%esp)\n\t"
3258 "jl .Llt_jump\n\t"
3259 "jne .Llt_fallthru\n\t"
3260 "cmpl %eax,(%esp)\n\t"
3261 "jnl .Llt_fallthru\n\t"
3262 ".Llt_jump:\n\t"
3263 "lea 0x8(%esp),%esp\n\t"
3264 "pop %eax\n\t"
3265 "pop %ebx\n\t"
3266 /* jmp, but don't trust the assembler to choose the right jump */
3267 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3268 ".Llt_fallthru:\n\t"
3269 "lea 0x8(%esp),%esp\n\t"
3270 "pop %eax\n\t"
3271 "pop %ebx");
3272
3273 if (offset_p)
3274 *offset_p = 20;
3275 if (size_p)
3276 *size_p = 4;
3277}
3278
3279void
3280i386_emit_le_goto (int *offset_p, int *size_p)
3281{
3282 EMIT_ASM32 (le,
3283 "cmpl %ebx,4(%esp)\n\t"
3284 "jle .Lle_jump\n\t"
3285 "jne .Lle_fallthru\n\t"
3286 "cmpl %eax,(%esp)\n\t"
3287 "jnle .Lle_fallthru\n\t"
3288 ".Lle_jump:\n\t"
3289 "lea 0x8(%esp),%esp\n\t"
3290 "pop %eax\n\t"
3291 "pop %ebx\n\t"
3292 /* jmp, but don't trust the assembler to choose the right jump */
3293 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3294 ".Lle_fallthru:\n\t"
3295 "lea 0x8(%esp),%esp\n\t"
3296 "pop %eax\n\t"
3297 "pop %ebx");
3298
3299 if (offset_p)
3300 *offset_p = 20;
3301 if (size_p)
3302 *size_p = 4;
3303}
3304
3305void
3306i386_emit_gt_goto (int *offset_p, int *size_p)
3307{
3308 EMIT_ASM32 (gt,
3309 "cmpl %ebx,4(%esp)\n\t"
3310 "jg .Lgt_jump\n\t"
3311 "jne .Lgt_fallthru\n\t"
3312 "cmpl %eax,(%esp)\n\t"
3313 "jng .Lgt_fallthru\n\t"
3314 ".Lgt_jump:\n\t"
3315 "lea 0x8(%esp),%esp\n\t"
3316 "pop %eax\n\t"
3317 "pop %ebx\n\t"
3318 /* jmp, but don't trust the assembler to choose the right jump */
3319 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3320 ".Lgt_fallthru:\n\t"
3321 "lea 0x8(%esp),%esp\n\t"
3322 "pop %eax\n\t"
3323 "pop %ebx");
3324
3325 if (offset_p)
3326 *offset_p = 20;
3327 if (size_p)
3328 *size_p = 4;
3329}
3330
3331void
3332i386_emit_ge_goto (int *offset_p, int *size_p)
3333{
3334 EMIT_ASM32 (ge,
3335 "cmpl %ebx,4(%esp)\n\t"
3336 "jge .Lge_jump\n\t"
3337 "jne .Lge_fallthru\n\t"
3338 "cmpl %eax,(%esp)\n\t"
3339 "jnge .Lge_fallthru\n\t"
3340 ".Lge_jump:\n\t"
3341 "lea 0x8(%esp),%esp\n\t"
3342 "pop %eax\n\t"
3343 "pop %ebx\n\t"
3344 /* jmp, but don't trust the assembler to choose the right jump */
3345 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3346 ".Lge_fallthru:\n\t"
3347 "lea 0x8(%esp),%esp\n\t"
3348 "pop %eax\n\t"
3349 "pop %ebx");
3350
3351 if (offset_p)
3352 *offset_p = 20;
3353 if (size_p)
3354 *size_p = 4;
3355}
3356
6a271cae
PA
3357struct emit_ops i386_emit_ops =
3358 {
3359 i386_emit_prologue,
3360 i386_emit_epilogue,
3361 i386_emit_add,
3362 i386_emit_sub,
3363 i386_emit_mul,
3364 i386_emit_lsh,
3365 i386_emit_rsh_signed,
3366 i386_emit_rsh_unsigned,
3367 i386_emit_ext,
3368 i386_emit_log_not,
3369 i386_emit_bit_and,
3370 i386_emit_bit_or,
3371 i386_emit_bit_xor,
3372 i386_emit_bit_not,
3373 i386_emit_equal,
3374 i386_emit_less_signed,
3375 i386_emit_less_unsigned,
3376 i386_emit_ref,
3377 i386_emit_if_goto,
3378 i386_emit_goto,
3379 i386_write_goto_address,
3380 i386_emit_const,
3381 i386_emit_call,
3382 i386_emit_reg,
3383 i386_emit_pop,
3384 i386_emit_stack_flush,
3385 i386_emit_zero_ext,
3386 i386_emit_swap,
3387 i386_emit_stack_adjust,
3388 i386_emit_int_call_1,
6b9801d4
SS
3389 i386_emit_void_call_2,
3390 i386_emit_eq_goto,
3391 i386_emit_ne_goto,
3392 i386_emit_lt_goto,
3393 i386_emit_le_goto,
3394 i386_emit_gt_goto,
3395 i386_emit_ge_goto
6a271cae
PA
3396 };
3397
3398
3399static struct emit_ops *
3400x86_emit_ops (void)
3401{
3402#ifdef __x86_64__
3aee8918 3403 if (is_64bit_tdesc ())
6a271cae
PA
3404 return &amd64_emit_ops;
3405 else
3406#endif
3407 return &i386_emit_ops;
3408}
3409
c2d6af84
PA
3410static int
3411x86_supports_range_stepping (void)
3412{
3413 return 1;
3414}
3415
d0722149
DE
3416/* This is initialized assuming an amd64 target.
3417 x86_arch_setup will correct it for i386 or amd64 targets. */
3418
3419struct linux_target_ops the_low_target =
3420{
3421 x86_arch_setup,
3aee8918
PA
3422 x86_linux_regs_info,
3423 x86_cannot_fetch_register,
3424 x86_cannot_store_register,
c14dfd32 3425 NULL, /* fetch_register */
d0722149
DE
3426 x86_get_pc,
3427 x86_set_pc,
3428 x86_breakpoint,
3429 x86_breakpoint_len,
3430 NULL,
3431 1,
3432 x86_breakpoint_at,
802e8e6d 3433 x86_supports_z_point_type,
aa5ca48f
DE
3434 x86_insert_point,
3435 x86_remove_point,
3436 x86_stopped_by_watchpoint,
3437 x86_stopped_data_address,
d0722149
DE
3438 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3439 native i386 case (no registers smaller than an xfer unit), and are not
3440 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3441 NULL,
3442 NULL,
3443 /* need to fix up i386 siginfo if host is amd64 */
3444 x86_siginfo_fixup,
aa5ca48f
DE
3445 x86_linux_new_process,
3446 x86_linux_new_thread,
1570b33e 3447 x86_linux_prepare_to_resume,
219f2f23 3448 x86_linux_process_qsupported,
fa593d66
PA
3449 x86_supports_tracepoints,
3450 x86_get_thread_area,
6a271cae 3451 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3452 x86_emit_ops,
3453 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3454 x86_supports_range_stepping,
d0722149 3455};
3aee8918
PA
3456
3457void
3458initialize_low_arch (void)
3459{
3460 /* Initialize the Linux target descriptions. */
3461#ifdef __x86_64__
3462 init_registers_amd64_linux ();
3463 init_registers_amd64_avx_linux ();
01f9f808 3464 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3465 init_registers_amd64_mpx_linux ();
3466
3aee8918 3467 init_registers_x32_linux ();
7e5aaa09 3468 init_registers_x32_avx_linux ();
01f9f808 3469 init_registers_x32_avx512_linux ();
3aee8918
PA
3470
3471 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3472 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3473 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3474#endif
3475 init_registers_i386_linux ();
3476 init_registers_i386_mmx_linux ();
3477 init_registers_i386_avx_linux ();
01f9f808 3478 init_registers_i386_avx512_linux ();
a196ebeb 3479 init_registers_i386_mpx_linux ();
3aee8918
PA
3480
3481 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3482 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3483 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3484
3485 initialize_regsets_info (&x86_regsets_info);
3486}
This page took 0.672238 seconds and 4 git commands to generate.