Introduce basic LWP accessors
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
32d0add0 3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
d0722149
DE
28
29#include "gdb_proc_service.h"
b5737fa9
PA
30/* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32#ifndef ELFMAG0
33#include "elf/common.h"
34#endif
35
58b4daa5 36#include "agent.h"
3aee8918 37#include "tdesc.h"
c144c7a0 38#include "tracepoint.h"
f699aaba 39#include "ax.h"
7b669087 40#include "nat/linux-nat.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
01f9f808
MS
51/* Defined in auto-generated file amd64-avx512-linux.c. */
52void init_registers_amd64_avx512_linux (void);
53extern const struct target_desc *tdesc_amd64_avx512_linux;
54
a196ebeb
WT
55/* Defined in auto-generated file amd64-mpx-linux.c. */
56void init_registers_amd64_mpx_linux (void);
57extern const struct target_desc *tdesc_amd64_mpx_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-linux.c. */
60void init_registers_x32_linux (void);
3aee8918
PA
61extern const struct target_desc *tdesc_x32_linux;
62
4d47af5c
L
63/* Defined in auto-generated file x32-avx-linux.c. */
64void init_registers_x32_avx_linux (void);
3aee8918 65extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 66
01f9f808
MS
67/* Defined in auto-generated file x32-avx512-linux.c. */
68void init_registers_x32_avx512_linux (void);
69extern const struct target_desc *tdesc_x32_avx512_linux;
70
3aee8918
PA
71#endif
72
73/* Defined in auto-generated file i386-linux.c. */
74void init_registers_i386_linux (void);
75extern const struct target_desc *tdesc_i386_linux;
76
77/* Defined in auto-generated file i386-mmx-linux.c. */
78void init_registers_i386_mmx_linux (void);
79extern const struct target_desc *tdesc_i386_mmx_linux;
80
81/* Defined in auto-generated file i386-avx-linux.c. */
82void init_registers_i386_avx_linux (void);
83extern const struct target_desc *tdesc_i386_avx_linux;
84
01f9f808
MS
85/* Defined in auto-generated file i386-avx512-linux.c. */
86void init_registers_i386_avx512_linux (void);
87extern const struct target_desc *tdesc_i386_avx512_linux;
88
a196ebeb
WT
89/* Defined in auto-generated file i386-mpx-linux.c. */
90void init_registers_i386_mpx_linux (void);
91extern const struct target_desc *tdesc_i386_mpx_linux;
92
3aee8918
PA
93#ifdef __x86_64__
94static struct target_desc *tdesc_amd64_linux_no_xml;
95#endif
96static struct target_desc *tdesc_i386_linux_no_xml;
97
1570b33e 98
fa593d66 99static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 100static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 101
1570b33e
L
102/* Backward compatibility for gdb without XML support. */
103
104static const char *xmltarget_i386_linux_no_xml = "@<target>\
105<architecture>i386</architecture>\
106<osabi>GNU/Linux</osabi>\
107</target>";
f6d1620c
L
108
109#ifdef __x86_64__
1570b33e
L
110static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111<architecture>i386:x86-64</architecture>\
112<osabi>GNU/Linux</osabi>\
113</target>";
f6d1620c 114#endif
d0722149
DE
115
116#include <sys/reg.h>
117#include <sys/procfs.h>
118#include <sys/ptrace.h>
1570b33e
L
119#include <sys/uio.h>
120
121#ifndef PTRACE_GETREGSET
122#define PTRACE_GETREGSET 0x4204
123#endif
124
125#ifndef PTRACE_SETREGSET
126#define PTRACE_SETREGSET 0x4205
127#endif
128
d0722149
DE
129
130#ifndef PTRACE_GET_THREAD_AREA
131#define PTRACE_GET_THREAD_AREA 25
132#endif
133
134/* This definition comes from prctl.h, but some kernels may not have it. */
135#ifndef PTRACE_ARCH_PRCTL
136#define PTRACE_ARCH_PRCTL 30
137#endif
138
139/* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141#ifndef ARCH_GET_FS
142#define ARCH_SET_GS 0x1001
143#define ARCH_SET_FS 0x1002
144#define ARCH_GET_FS 0x1003
145#define ARCH_GET_GS 0x1004
146#endif
147
aa5ca48f
DE
148/* Per-process arch-specific data we want to keep. */
149
150struct arch_process_info
151{
df7e5265 152 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
153};
154
155/* Per-thread arch-specific data we want to keep. */
156
157struct arch_lwp_info
158{
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161};
162
d0722149
DE
163#ifdef __x86_64__
164
165/* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168static /*const*/ int i386_regmap[] =
169{
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174};
175
176#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178/* So code below doesn't have to care, i386 or amd64. */
179#define ORIG_EAX ORIG_RAX
bc9540e8 180#define REGSIZE 8
d0722149
DE
181
182static const int x86_64_regmap[] =
183{
184 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
185 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
186 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
187 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
193 -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 ORIG_RAX * 8,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
207};
208
209#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 210#define X86_64_USER_REGS (GS + 1)
d0722149
DE
211
212#else /* ! __x86_64__ */
213
214/* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216static /*const*/ int i386_regmap[] =
217{
218 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
219 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
220 EIP * 4, EFL * 4, CS * 4, SS * 4,
221 DS * 4, ES * 4, FS * 4, GS * 4
222};
223
224#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225
bc9540e8
PA
226#define REGSIZE 4
227
d0722149 228#endif
3aee8918
PA
229
230#ifdef __x86_64__
231
232/* Returns true if the current inferior belongs to a x86-64 process,
233 per the tdesc. */
234
235static int
236is_64bit_tdesc (void)
237{
0bfdf32f 238 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
239
240 return register_size (regcache->tdesc, 0) == 8;
241}
242
243#endif
244
d0722149
DE
245\f
246/* Called by libthread_db. */
247
248ps_err_e
249ps_get_thread_area (const struct ps_prochandle *ph,
250 lwpid_t lwpid, int idx, void **base)
251{
252#ifdef __x86_64__
3aee8918 253 int use_64bit = is_64bit_tdesc ();
d0722149
DE
254
255 if (use_64bit)
256 {
257 switch (idx)
258 {
259 case FS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
261 return PS_OK;
262 break;
263 case GS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
265 return PS_OK;
266 break;
267 default:
268 return PS_BADADDR;
269 }
270 return PS_ERR;
271 }
272#endif
273
274 {
275 unsigned int desc[4];
276
277 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
278 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
279 return PS_ERR;
280
d1ec4ce7
DE
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
283 return PS_OK;
284 }
285}
fa593d66
PA
286
287/* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
291
292static int
293x86_get_thread_area (int lwpid, CORE_ADDR *addr)
294{
295#ifdef __x86_64__
3aee8918 296 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
297
298 if (use_64bit)
299 {
300 void *base;
301 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
302 {
303 *addr = (CORE_ADDR) (uintptr_t) base;
304 return 0;
305 }
306
307 return -1;
308 }
309#endif
310
311 {
312 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
313 struct thread_info *thr = get_lwp_thread (lwp);
314 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
315 unsigned int desc[4];
316 ULONGEST gs = 0;
317 const int reg_thread_area = 3; /* bits to scale down register value. */
318 int idx;
319
320 collect_register_by_name (regcache, "gs", &gs);
321
322 idx = gs >> reg_thread_area;
323
324 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 325 lwpid_of (thr),
493e2a69 326 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
327 return -1;
328
329 *addr = desc[1];
330 return 0;
331 }
332}
333
334
d0722149
DE
335\f
336static int
3aee8918 337x86_cannot_store_register (int regno)
d0722149 338{
3aee8918
PA
339#ifdef __x86_64__
340 if (is_64bit_tdesc ())
341 return 0;
342#endif
343
d0722149
DE
344 return regno >= I386_NUM_REGS;
345}
346
347static int
3aee8918 348x86_cannot_fetch_register (int regno)
d0722149 349{
3aee8918
PA
350#ifdef __x86_64__
351 if (is_64bit_tdesc ())
352 return 0;
353#endif
354
d0722149
DE
355 return regno >= I386_NUM_REGS;
356}
357
358static void
442ea881 359x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
360{
361 int i;
362
363#ifdef __x86_64__
3aee8918 364 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
442ea881 368 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
369 return;
370 }
9e0aa64f
JK
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
375#endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 379
442ea881 380 collect_register_by_name (regcache, "orig_eax",
bc9540e8 381 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
382}
383
384static void
442ea881 385x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
386{
387 int i;
388
389#ifdef __x86_64__
3aee8918 390 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
391 {
392 for (i = 0; i < X86_64_NUM_REGS; i++)
393 if (x86_64_regmap[i] != -1)
442ea881 394 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
395 return;
396 }
397#endif
398
399 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 400 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 401
442ea881 402 supply_register_by_name (regcache, "orig_eax",
bc9540e8 403 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
404}
405
406static void
442ea881 407x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
408{
409#ifdef __x86_64__
442ea881 410 i387_cache_to_fxsave (regcache, buf);
d0722149 411#else
442ea881 412 i387_cache_to_fsave (regcache, buf);
d0722149
DE
413#endif
414}
415
416static void
442ea881 417x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
418{
419#ifdef __x86_64__
442ea881 420 i387_fxsave_to_cache (regcache, buf);
d0722149 421#else
442ea881 422 i387_fsave_to_cache (regcache, buf);
d0722149
DE
423#endif
424}
425
426#ifndef __x86_64__
427
428static void
442ea881 429x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 430{
442ea881 431 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
432}
433
434static void
442ea881 435x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 436{
442ea881 437 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
438}
439
440#endif
441
1570b33e
L
442static void
443x86_fill_xstateregset (struct regcache *regcache, void *buf)
444{
445 i387_cache_to_xsave (regcache, buf);
446}
447
448static void
449x86_store_xstateregset (struct regcache *regcache, const void *buf)
450{
451 i387_xsave_to_cache (regcache, buf);
452}
453
d0722149
DE
454/* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
3aee8918 459 and update the supported regsets accordingly. */
d0722149 460
3aee8918 461static struct regset_info x86_regsets[] =
d0722149
DE
462{
463#ifdef HAVE_PTRACE_GETREGS
1570b33e 464 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
465 GENERAL_REGS,
466 x86_fill_gregset, x86_store_gregset },
1570b33e
L
467 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
468 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
469# ifndef __x86_64__
470# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 471 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
472 EXTENDED_REGS,
473 x86_fill_fpxregset, x86_store_fpxregset },
474# endif
475# endif
1570b33e 476 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
477 FP_REGS,
478 x86_fill_fpregset, x86_store_fpregset },
479#endif /* HAVE_PTRACE_GETREGS */
1570b33e 480 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
481};
482
483static CORE_ADDR
442ea881 484x86_get_pc (struct regcache *regcache)
d0722149 485{
3aee8918 486 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
487
488 if (use_64bit)
489 {
490 unsigned long pc;
442ea881 491 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
492 return (CORE_ADDR) pc;
493 }
494 else
495 {
496 unsigned int pc;
442ea881 497 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
498 return (CORE_ADDR) pc;
499 }
500}
501
502static void
442ea881 503x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 504{
3aee8918 505 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
506
507 if (use_64bit)
508 {
509 unsigned long newpc = pc;
442ea881 510 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
511 }
512 else
513 {
514 unsigned int newpc = pc;
442ea881 515 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
516 }
517}
518\f
519static const unsigned char x86_breakpoint[] = { 0xCC };
520#define x86_breakpoint_len 1
521
522static int
523x86_breakpoint_at (CORE_ADDR pc)
524{
525 unsigned char c;
526
fc7238bb 527 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
528 if (c == 0xCC)
529 return 1;
530
531 return 0;
532}
533\f
4180215b
PA
534
535/* Return the offset of REGNUM in the u_debugreg field of struct
536 user. */
537
538static int
539u_debugreg_offset (int regnum)
540{
541 return (offsetof (struct user, u_debugreg)
542 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
543}
544
545
aa5ca48f
DE
546/* Support for debug registers. */
547
548static unsigned long
549x86_linux_dr_get (ptid_t ptid, int regnum)
550{
551 int tid;
552 unsigned long value;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
4180215b 557 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
aa5ca48f
DE
558 if (errno != 0)
559 error ("Couldn't read debug register");
560
561 return value;
562}
563
564static void
565x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
566{
567 int tid;
568
569 tid = ptid_get_lwp (ptid);
570
571 errno = 0;
4180215b 572 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
aa5ca48f
DE
573 if (errno != 0)
574 error ("Couldn't write debug register");
575}
576
964e4306 577static int
6d4ee8c6 578update_debug_registers_callback (struct lwp_info *lwp, void *arg)
964e4306 579{
6d4ee8c6
GB
580 /* The actual update is done later just before resuming the lwp,
581 we just mark that the registers need updating. */
582 lwp->arch_private->debug_registers_changed = 1;
964e4306 583
6d4ee8c6
GB
584 /* If the lwp isn't stopped, force it to momentarily pause, so
585 we can update its debug registers. */
cff068da 586 if (!lwp_is_stopped (lwp))
6d4ee8c6 587 linux_stop_lwp (lwp);
964e4306
PA
588
589 return 0;
590}
591
aa5ca48f
DE
592/* Update the inferior's debug register REGNUM from STATE. */
593
42995dbd 594static void
df7e5265 595x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
aa5ca48f 596{
964e4306 597 /* Only update the threads of this process. */
6d4ee8c6 598 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
aa5ca48f 599
f7160e97 600 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
aa5ca48f 601
6d4ee8c6 602 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
964e4306 603}
aa5ca48f 604
964e4306 605/* Return the inferior's debug register REGNUM. */
aa5ca48f 606
42995dbd 607static CORE_ADDR
df7e5265 608x86_dr_low_get_addr (int regnum)
964e4306 609{
0a5b1e09 610 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306 611
7b669087 612 return x86_linux_dr_get (current_lwp_ptid (), regnum);
aa5ca48f
DE
613}
614
615/* Update the inferior's DR7 debug control register from STATE. */
616
42995dbd 617static void
df7e5265 618x86_dr_low_set_control (unsigned long control)
aa5ca48f 619{
964e4306 620 /* Only update the threads of this process. */
6d4ee8c6 621 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
aa5ca48f 622
6d4ee8c6 623 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
964e4306 624}
aa5ca48f 625
964e4306
PA
626/* Return the inferior's DR7 debug control register. */
627
42995dbd 628static unsigned long
df7e5265 629x86_dr_low_get_control (void)
964e4306 630{
7b669087 631 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
aa5ca48f
DE
632}
633
634/* Get the value of the DR6 debug status register from the inferior
635 and record it in STATE. */
636
42995dbd 637static unsigned long
df7e5265 638x86_dr_low_get_status (void)
aa5ca48f 639{
7b669087 640 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
aa5ca48f 641}
42995dbd
GB
642
643/* Low-level function vector. */
df7e5265 644struct x86_dr_low_type x86_dr_low =
42995dbd 645 {
df7e5265
GB
646 x86_dr_low_set_control,
647 x86_dr_low_set_addr,
648 x86_dr_low_get_addr,
649 x86_dr_low_get_status,
650 x86_dr_low_get_control,
42995dbd
GB
651 sizeof (void *),
652 };
aa5ca48f 653\f
90d74c30 654/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
655
656static int
802e8e6d
PA
657x86_supports_z_point_type (char z_type)
658{
659 switch (z_type)
660 {
661 case Z_PACKET_SW_BP:
662 case Z_PACKET_HW_BP:
663 case Z_PACKET_WRITE_WP:
664 case Z_PACKET_ACCESS_WP:
665 return 1;
666 default:
667 return 0;
668 }
669}
670
671static int
672x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
673 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
674{
675 struct process_info *proc = current_process ();
802e8e6d 676
aa5ca48f
DE
677 switch (type)
678 {
802e8e6d
PA
679 case raw_bkpt_type_sw:
680 return insert_memory_breakpoint (bp);
681
682 case raw_bkpt_type_hw:
683 case raw_bkpt_type_write_wp:
684 case raw_bkpt_type_access_wp:
a4165e94 685 {
802e8e6d
PA
686 enum target_hw_bp_type hw_type
687 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 688 struct x86_debug_reg_state *state
fe978cb0 689 = &proc->priv->arch_private->debug_reg_state;
a4165e94 690
df7e5265 691 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 692 }
961bd387 693
aa5ca48f
DE
694 default:
695 /* Unsupported. */
696 return 1;
697 }
698}
699
700static int
802e8e6d
PA
701x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
702 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
703{
704 struct process_info *proc = current_process ();
802e8e6d 705
aa5ca48f
DE
706 switch (type)
707 {
802e8e6d
PA
708 case raw_bkpt_type_sw:
709 return remove_memory_breakpoint (bp);
710
711 case raw_bkpt_type_hw:
712 case raw_bkpt_type_write_wp:
713 case raw_bkpt_type_access_wp:
a4165e94 714 {
802e8e6d
PA
715 enum target_hw_bp_type hw_type
716 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 717 struct x86_debug_reg_state *state
fe978cb0 718 = &proc->priv->arch_private->debug_reg_state;
a4165e94 719
df7e5265 720 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 721 }
aa5ca48f
DE
722 default:
723 /* Unsupported. */
724 return 1;
725 }
726}
727
728static int
729x86_stopped_by_watchpoint (void)
730{
731 struct process_info *proc = current_process ();
fe978cb0 732 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
733}
734
735static CORE_ADDR
736x86_stopped_data_address (void)
737{
738 struct process_info *proc = current_process ();
739 CORE_ADDR addr;
fe978cb0 740 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 741 &addr))
aa5ca48f
DE
742 return addr;
743 return 0;
744}
745\f
746/* Called when a new process is created. */
747
748static struct arch_process_info *
749x86_linux_new_process (void)
750{
ed859da7 751 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 752
df7e5265 753 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
754
755 return info;
756}
757
758/* Called when a new thread is detected. */
759
760static struct arch_lwp_info *
761x86_linux_new_thread (void)
762{
ed859da7 763 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
aa5ca48f
DE
764
765 info->debug_registers_changed = 1;
766
767 return info;
768}
769
70a0bb6b
GB
770/* See nat/x86-dregs.h. */
771
772struct x86_debug_reg_state *
773x86_debug_reg_state (pid_t pid)
774{
775 struct process_info *proc = find_process_pid (pid);
776
777 return &proc->priv->arch_private->debug_reg_state;
778}
779
aa5ca48f
DE
780/* Called when resuming a thread.
781 If the debug regs have changed, update the thread's copies. */
782
783static void
784x86_linux_prepare_to_resume (struct lwp_info *lwp)
785{
cff068da 786 ptid_t ptid = ptid_of_lwp (lwp);
6210a125 787 int clear_status = 0;
b9a881c2 788
aa5ca48f
DE
789 if (lwp->arch_private->debug_registers_changed)
790 {
df7e5265 791 struct x86_debug_reg_state *state
70a0bb6b
GB
792 = x86_debug_reg_state (ptid_get_pid (ptid));
793 int i;
aa5ca48f 794
8e9db26e
PA
795 x86_linux_dr_set (ptid, DR_CONTROL, 0);
796
97ea6506 797 ALL_DEBUG_ADDRESS_REGISTERS (i)
6210a125
PA
798 if (state->dr_ref_count[i] > 0)
799 {
800 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
801
802 /* If we're setting a watchpoint, any change the inferior
803 had done itself to the debug registers needs to be
df7e5265 804 discarded, otherwise, x86_dr_stopped_data_address can
6210a125
PA
805 get confused. */
806 clear_status = 1;
807 }
aa5ca48f 808
8e9db26e
PA
809 if (state->dr_control_mirror != 0)
810 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
aa5ca48f
DE
811
812 lwp->arch_private->debug_registers_changed = 0;
813 }
b9a881c2 814
cff068da
GB
815 if (clear_status
816 || lwp_stop_reason (lwp) == TARGET_STOPPED_BY_WATCHPOINT)
b9a881c2 817 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
818}
819\f
d0722149
DE
820/* When GDBSERVER is built as a 64-bit application on linux, the
821 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
822 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
823 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
824 conversion in-place ourselves. */
825
826/* These types below (compat_*) define a siginfo type that is layout
827 compatible with the siginfo type exported by the 32-bit userspace
828 support. */
829
830#ifdef __x86_64__
831
832typedef int compat_int_t;
833typedef unsigned int compat_uptr_t;
834
835typedef int compat_time_t;
836typedef int compat_timer_t;
837typedef int compat_clock_t;
838
839struct compat_timeval
840{
841 compat_time_t tv_sec;
842 int tv_usec;
843};
844
845typedef union compat_sigval
846{
847 compat_int_t sival_int;
848 compat_uptr_t sival_ptr;
849} compat_sigval_t;
850
851typedef struct compat_siginfo
852{
853 int si_signo;
854 int si_errno;
855 int si_code;
856
857 union
858 {
859 int _pad[((128 / sizeof (int)) - 3)];
860
861 /* kill() */
862 struct
863 {
864 unsigned int _pid;
865 unsigned int _uid;
866 } _kill;
867
868 /* POSIX.1b timers */
869 struct
870 {
871 compat_timer_t _tid;
872 int _overrun;
873 compat_sigval_t _sigval;
874 } _timer;
875
876 /* POSIX.1b signals */
877 struct
878 {
879 unsigned int _pid;
880 unsigned int _uid;
881 compat_sigval_t _sigval;
882 } _rt;
883
884 /* SIGCHLD */
885 struct
886 {
887 unsigned int _pid;
888 unsigned int _uid;
889 int _status;
890 compat_clock_t _utime;
891 compat_clock_t _stime;
892 } _sigchld;
893
894 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
895 struct
896 {
897 unsigned int _addr;
898 } _sigfault;
899
900 /* SIGPOLL */
901 struct
902 {
903 int _band;
904 int _fd;
905 } _sigpoll;
906 } _sifields;
907} compat_siginfo_t;
908
c92b5177
L
909/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
910typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
911
912typedef struct compat_x32_siginfo
913{
914 int si_signo;
915 int si_errno;
916 int si_code;
917
918 union
919 {
920 int _pad[((128 / sizeof (int)) - 3)];
921
922 /* kill() */
923 struct
924 {
925 unsigned int _pid;
926 unsigned int _uid;
927 } _kill;
928
929 /* POSIX.1b timers */
930 struct
931 {
932 compat_timer_t _tid;
933 int _overrun;
934 compat_sigval_t _sigval;
935 } _timer;
936
937 /* POSIX.1b signals */
938 struct
939 {
940 unsigned int _pid;
941 unsigned int _uid;
942 compat_sigval_t _sigval;
943 } _rt;
944
945 /* SIGCHLD */
946 struct
947 {
948 unsigned int _pid;
949 unsigned int _uid;
950 int _status;
951 compat_x32_clock_t _utime;
952 compat_x32_clock_t _stime;
953 } _sigchld;
954
955 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
956 struct
957 {
958 unsigned int _addr;
959 } _sigfault;
960
961 /* SIGPOLL */
962 struct
963 {
964 int _band;
965 int _fd;
966 } _sigpoll;
967 } _sifields;
968} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
969
d0722149
DE
970#define cpt_si_pid _sifields._kill._pid
971#define cpt_si_uid _sifields._kill._uid
972#define cpt_si_timerid _sifields._timer._tid
973#define cpt_si_overrun _sifields._timer._overrun
974#define cpt_si_status _sifields._sigchld._status
975#define cpt_si_utime _sifields._sigchld._utime
976#define cpt_si_stime _sifields._sigchld._stime
977#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
978#define cpt_si_addr _sifields._sigfault._addr
979#define cpt_si_band _sifields._sigpoll._band
980#define cpt_si_fd _sifields._sigpoll._fd
981
982/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
983 In their place is si_timer1,si_timer2. */
984#ifndef si_timerid
985#define si_timerid si_timer1
986#endif
987#ifndef si_overrun
988#define si_overrun si_timer2
989#endif
990
991static void
992compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
993{
994 memset (to, 0, sizeof (*to));
995
996 to->si_signo = from->si_signo;
997 to->si_errno = from->si_errno;
998 to->si_code = from->si_code;
999
b53a1623 1000 if (to->si_code == SI_TIMER)
d0722149 1001 {
b53a1623
PA
1002 to->cpt_si_timerid = from->si_timerid;
1003 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
1004 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1005 }
1006 else if (to->si_code == SI_USER)
1007 {
1008 to->cpt_si_pid = from->si_pid;
1009 to->cpt_si_uid = from->si_uid;
1010 }
b53a1623 1011 else if (to->si_code < 0)
d0722149 1012 {
b53a1623
PA
1013 to->cpt_si_pid = from->si_pid;
1014 to->cpt_si_uid = from->si_uid;
d0722149
DE
1015 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1016 }
1017 else
1018 {
1019 switch (to->si_signo)
1020 {
1021 case SIGCHLD:
1022 to->cpt_si_pid = from->si_pid;
1023 to->cpt_si_uid = from->si_uid;
1024 to->cpt_si_status = from->si_status;
1025 to->cpt_si_utime = from->si_utime;
1026 to->cpt_si_stime = from->si_stime;
1027 break;
1028 case SIGILL:
1029 case SIGFPE:
1030 case SIGSEGV:
1031 case SIGBUS:
1032 to->cpt_si_addr = (intptr_t) from->si_addr;
1033 break;
1034 case SIGPOLL:
1035 to->cpt_si_band = from->si_band;
1036 to->cpt_si_fd = from->si_fd;
1037 break;
1038 default:
1039 to->cpt_si_pid = from->si_pid;
1040 to->cpt_si_uid = from->si_uid;
1041 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1042 break;
1043 }
1044 }
1045}
1046
1047static void
1048siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1049{
1050 memset (to, 0, sizeof (*to));
1051
1052 to->si_signo = from->si_signo;
1053 to->si_errno = from->si_errno;
1054 to->si_code = from->si_code;
1055
b53a1623 1056 if (to->si_code == SI_TIMER)
d0722149 1057 {
b53a1623
PA
1058 to->si_timerid = from->cpt_si_timerid;
1059 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1060 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1061 }
1062 else if (to->si_code == SI_USER)
1063 {
1064 to->si_pid = from->cpt_si_pid;
1065 to->si_uid = from->cpt_si_uid;
1066 }
b53a1623 1067 else if (to->si_code < 0)
d0722149 1068 {
b53a1623
PA
1069 to->si_pid = from->cpt_si_pid;
1070 to->si_uid = from->cpt_si_uid;
d0722149
DE
1071 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1072 }
1073 else
1074 {
1075 switch (to->si_signo)
1076 {
1077 case SIGCHLD:
1078 to->si_pid = from->cpt_si_pid;
1079 to->si_uid = from->cpt_si_uid;
1080 to->si_status = from->cpt_si_status;
1081 to->si_utime = from->cpt_si_utime;
1082 to->si_stime = from->cpt_si_stime;
1083 break;
1084 case SIGILL:
1085 case SIGFPE:
1086 case SIGSEGV:
1087 case SIGBUS:
1088 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1089 break;
1090 case SIGPOLL:
1091 to->si_band = from->cpt_si_band;
1092 to->si_fd = from->cpt_si_fd;
1093 break;
1094 default:
1095 to->si_pid = from->cpt_si_pid;
1096 to->si_uid = from->cpt_si_uid;
1097 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1098 break;
1099 }
1100 }
1101}
1102
c92b5177
L
1103static void
1104compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1105 siginfo_t *from)
1106{
1107 memset (to, 0, sizeof (*to));
1108
1109 to->si_signo = from->si_signo;
1110 to->si_errno = from->si_errno;
1111 to->si_code = from->si_code;
1112
1113 if (to->si_code == SI_TIMER)
1114 {
1115 to->cpt_si_timerid = from->si_timerid;
1116 to->cpt_si_overrun = from->si_overrun;
1117 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1118 }
1119 else if (to->si_code == SI_USER)
1120 {
1121 to->cpt_si_pid = from->si_pid;
1122 to->cpt_si_uid = from->si_uid;
1123 }
1124 else if (to->si_code < 0)
1125 {
1126 to->cpt_si_pid = from->si_pid;
1127 to->cpt_si_uid = from->si_uid;
1128 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1129 }
1130 else
1131 {
1132 switch (to->si_signo)
1133 {
1134 case SIGCHLD:
1135 to->cpt_si_pid = from->si_pid;
1136 to->cpt_si_uid = from->si_uid;
1137 to->cpt_si_status = from->si_status;
1138 to->cpt_si_utime = from->si_utime;
1139 to->cpt_si_stime = from->si_stime;
1140 break;
1141 case SIGILL:
1142 case SIGFPE:
1143 case SIGSEGV:
1144 case SIGBUS:
1145 to->cpt_si_addr = (intptr_t) from->si_addr;
1146 break;
1147 case SIGPOLL:
1148 to->cpt_si_band = from->si_band;
1149 to->cpt_si_fd = from->si_fd;
1150 break;
1151 default:
1152 to->cpt_si_pid = from->si_pid;
1153 to->cpt_si_uid = from->si_uid;
1154 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1155 break;
1156 }
1157 }
1158}
1159
1160static void
1161siginfo_from_compat_x32_siginfo (siginfo_t *to,
1162 compat_x32_siginfo_t *from)
1163{
1164 memset (to, 0, sizeof (*to));
1165
1166 to->si_signo = from->si_signo;
1167 to->si_errno = from->si_errno;
1168 to->si_code = from->si_code;
1169
1170 if (to->si_code == SI_TIMER)
1171 {
1172 to->si_timerid = from->cpt_si_timerid;
1173 to->si_overrun = from->cpt_si_overrun;
1174 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1175 }
1176 else if (to->si_code == SI_USER)
1177 {
1178 to->si_pid = from->cpt_si_pid;
1179 to->si_uid = from->cpt_si_uid;
1180 }
1181 else if (to->si_code < 0)
1182 {
1183 to->si_pid = from->cpt_si_pid;
1184 to->si_uid = from->cpt_si_uid;
1185 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1186 }
1187 else
1188 {
1189 switch (to->si_signo)
1190 {
1191 case SIGCHLD:
1192 to->si_pid = from->cpt_si_pid;
1193 to->si_uid = from->cpt_si_uid;
1194 to->si_status = from->cpt_si_status;
1195 to->si_utime = from->cpt_si_utime;
1196 to->si_stime = from->cpt_si_stime;
1197 break;
1198 case SIGILL:
1199 case SIGFPE:
1200 case SIGSEGV:
1201 case SIGBUS:
1202 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1203 break;
1204 case SIGPOLL:
1205 to->si_band = from->cpt_si_band;
1206 to->si_fd = from->cpt_si_fd;
1207 break;
1208 default:
1209 to->si_pid = from->cpt_si_pid;
1210 to->si_uid = from->cpt_si_uid;
1211 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1212 break;
1213 }
1214 }
1215}
1216
d0722149
DE
1217#endif /* __x86_64__ */
1218
1219/* Convert a native/host siginfo object, into/from the siginfo in the
1220 layout of the inferiors' architecture. Returns true if any
1221 conversion was done; false otherwise. If DIRECTION is 1, then copy
1222 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1223 INF. */
1224
1225static int
a5362b9a 1226x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1227{
1228#ifdef __x86_64__
760256f9 1229 unsigned int machine;
0bfdf32f 1230 int tid = lwpid_of (current_thread);
760256f9
PA
1231 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1232
d0722149 1233 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1234 if (!is_64bit_tdesc ())
d0722149 1235 {
38e08fca 1236 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
d0722149
DE
1237
1238 if (direction == 0)
1239 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1240 else
1241 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1242
c92b5177
L
1243 return 1;
1244 }
1245 /* No fixup for native x32 GDB. */
760256f9 1246 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177 1247 {
38e08fca 1248 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
c92b5177
L
1249
1250 if (direction == 0)
1251 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1252 native);
1253 else
1254 siginfo_from_compat_x32_siginfo (native,
1255 (struct compat_x32_siginfo *) inf);
1256
d0722149
DE
1257 return 1;
1258 }
1259#endif
1260
1261 return 0;
1262}
1263\f
1570b33e
L
1264static int use_xml;
1265
3aee8918
PA
1266/* Format of XSAVE extended state is:
1267 struct
1268 {
1269 fxsave_bytes[0..463]
1270 sw_usable_bytes[464..511]
1271 xstate_hdr_bytes[512..575]
1272 avx_bytes[576..831]
1273 future_state etc
1274 };
1275
1276 Same memory layout will be used for the coredump NT_X86_XSTATE
1277 representing the XSAVE extended state registers.
1278
1279 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1280 extended state mask, which is the same as the extended control register
1281 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1282 together with the mask saved in the xstate_hdr_bytes to determine what
1283 states the processor/OS supports and what state, used or initialized,
1284 the process/thread is in. */
1285#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1286
1287/* Does the current host support the GETFPXREGS request? The header
1288 file may or may not define it, and even if it is defined, the
1289 kernel will return EIO if it's running on a pre-SSE processor. */
1290int have_ptrace_getfpxregs =
1291#ifdef HAVE_PTRACE_GETFPXREGS
1292 -1
1293#else
1294 0
1295#endif
1296;
1570b33e 1297
3aee8918
PA
1298/* Does the current host support PTRACE_GETREGSET? */
1299static int have_ptrace_getregset = -1;
1300
1301/* Get Linux/x86 target description from running target. */
1302
1303static const struct target_desc *
1304x86_linux_read_description (void)
1570b33e 1305{
3aee8918
PA
1306 unsigned int machine;
1307 int is_elf64;
a196ebeb 1308 int xcr0_features;
3aee8918
PA
1309 int tid;
1310 static uint64_t xcr0;
3a13a53b 1311 struct regset_info *regset;
1570b33e 1312
0bfdf32f 1313 tid = lwpid_of (current_thread);
1570b33e 1314
3aee8918 1315 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1316
3aee8918 1317 if (sizeof (void *) == 4)
3a13a53b 1318 {
3aee8918
PA
1319 if (is_elf64 > 0)
1320 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1321#ifndef __x86_64__
1322 else if (machine == EM_X86_64)
1323 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1324#endif
1325 }
3a13a53b 1326
3aee8918
PA
1327#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1328 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1329 {
1330 elf_fpxregset_t fpxregs;
3a13a53b 1331
3aee8918 1332 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1333 {
3aee8918
PA
1334 have_ptrace_getfpxregs = 0;
1335 have_ptrace_getregset = 0;
1336 return tdesc_i386_mmx_linux;
3a13a53b 1337 }
3aee8918
PA
1338 else
1339 have_ptrace_getfpxregs = 1;
3a13a53b 1340 }
1570b33e
L
1341#endif
1342
1343 if (!use_xml)
1344 {
df7e5265 1345 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 1346
1570b33e
L
1347 /* Don't use XML. */
1348#ifdef __x86_64__
3aee8918
PA
1349 if (machine == EM_X86_64)
1350 return tdesc_amd64_linux_no_xml;
1570b33e 1351 else
1570b33e 1352#endif
3aee8918 1353 return tdesc_i386_linux_no_xml;
1570b33e
L
1354 }
1355
1570b33e
L
1356 if (have_ptrace_getregset == -1)
1357 {
df7e5265 1358 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1359 struct iovec iov;
1570b33e
L
1360
1361 iov.iov_base = xstateregs;
1362 iov.iov_len = sizeof (xstateregs);
1363
1364 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1365 if (ptrace (PTRACE_GETREGSET, tid,
1366 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1367 have_ptrace_getregset = 0;
1368 else
1570b33e 1369 {
3aee8918
PA
1370 have_ptrace_getregset = 1;
1371
1372 /* Get XCR0 from XSAVE extended state. */
1373 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1374 / sizeof (uint64_t))];
1375
1376 /* Use PTRACE_GETREGSET if it is available. */
1377 for (regset = x86_regsets;
1378 regset->fill_function != NULL; regset++)
1379 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 1380 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
1381 else if (regset->type != GENERAL_REGS)
1382 regset->size = 0;
1570b33e 1383 }
1570b33e
L
1384 }
1385
3aee8918 1386 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 1387 xcr0_features = (have_ptrace_getregset
df7e5265 1388 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 1389
a196ebeb 1390 if (xcr0_features)
3aee8918 1391 x86_xcr0 = xcr0;
1570b33e 1392
3aee8918
PA
1393 if (machine == EM_X86_64)
1394 {
1570b33e 1395#ifdef __x86_64__
a196ebeb 1396 if (is_elf64)
3aee8918 1397 {
a196ebeb
WT
1398 if (xcr0_features)
1399 {
df7e5265 1400 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1401 {
df7e5265 1402 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1403 return tdesc_amd64_avx512_linux;
1404
df7e5265 1405 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
1406 return tdesc_amd64_mpx_linux;
1407
df7e5265 1408 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1409 return tdesc_amd64_avx_linux;
1410
1411 default:
1412 return tdesc_amd64_linux;
1413 }
1414 }
4d47af5c 1415 else
a196ebeb 1416 return tdesc_amd64_linux;
3aee8918
PA
1417 }
1418 else
1419 {
a196ebeb
WT
1420 if (xcr0_features)
1421 {
df7e5265 1422 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1423 {
df7e5265 1424 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1425 return tdesc_x32_avx512_linux;
1426
df7e5265
GB
1427 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1428 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1429 return tdesc_x32_avx_linux;
1430
1431 default:
1432 return tdesc_x32_linux;
1433 }
1434 }
3aee8918 1435 else
a196ebeb 1436 return tdesc_x32_linux;
1570b33e 1437 }
3aee8918 1438#endif
1570b33e 1439 }
3aee8918
PA
1440 else
1441 {
a196ebeb
WT
1442 if (xcr0_features)
1443 {
df7e5265 1444 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1445 {
df7e5265 1446 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
1447 return tdesc_i386_avx512_linux;
1448
df7e5265 1449 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
1450 return tdesc_i386_mpx_linux;
1451
df7e5265 1452 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
1453 return tdesc_i386_avx_linux;
1454
1455 default:
1456 return tdesc_i386_linux;
1457 }
1458 }
3aee8918
PA
1459 else
1460 return tdesc_i386_linux;
1461 }
1462
1463 gdb_assert_not_reached ("failed to return tdesc");
1464}
1465
1466/* Callback for find_inferior. Stops iteration when a thread with a
1467 given PID is found. */
1468
1469static int
1470same_process_callback (struct inferior_list_entry *entry, void *data)
1471{
1472 int pid = *(int *) data;
1473
1474 return (ptid_get_pid (entry->id) == pid);
1475}
1476
1477/* Callback for for_each_inferior. Calls the arch_setup routine for
1478 each process. */
1479
1480static void
1481x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1482{
1483 int pid = ptid_get_pid (entry->id);
1484
1485 /* Look up any thread of this processes. */
0bfdf32f 1486 current_thread
3aee8918
PA
1487 = (struct thread_info *) find_inferior (&all_threads,
1488 same_process_callback, &pid);
1489
1490 the_low_target.arch_setup ();
1491}
1492
1493/* Update all the target description of all processes; a new GDB
1494 connected, and it may or not support xml target descriptions. */
1495
1496static void
1497x86_linux_update_xmltarget (void)
1498{
0bfdf32f 1499 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1500
1501 /* Before changing the register cache's internal layout, flush the
1502 contents of the current valid caches back to the threads, and
1503 release the current regcache objects. */
1504 regcache_release ();
1505
1506 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1507
0bfdf32f 1508 current_thread = saved_thread;
1570b33e
L
1509}
1510
1511/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1512 PTRACE_GETREGSET. */
1513
1514static void
1515x86_linux_process_qsupported (const char *query)
1516{
1517 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1518 with "i386" in qSupported query, it supports x86 XML target
1519 descriptions. */
1520 use_xml = 0;
61012eef 1521 if (query != NULL && startswith (query, "xmlRegisters="))
1570b33e
L
1522 {
1523 char *copy = xstrdup (query + 13);
1524 char *p;
1525
1526 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1527 {
1528 if (strcmp (p, "i386") == 0)
1529 {
1530 use_xml = 1;
1531 break;
1532 }
1533 }
1534
1535 free (copy);
1536 }
1537
1538 x86_linux_update_xmltarget ();
1539}
1540
3aee8918 1541/* Common for x86/x86-64. */
d0722149 1542
3aee8918
PA
1543static struct regsets_info x86_regsets_info =
1544 {
1545 x86_regsets, /* regsets */
1546 0, /* num_regsets */
1547 NULL, /* disabled_regsets */
1548 };
214d508e
L
1549
1550#ifdef __x86_64__
3aee8918
PA
1551static struct regs_info amd64_linux_regs_info =
1552 {
1553 NULL, /* regset_bitmap */
1554 NULL, /* usrregs_info */
1555 &x86_regsets_info
1556 };
d0722149 1557#endif
3aee8918
PA
1558static struct usrregs_info i386_linux_usrregs_info =
1559 {
1560 I386_NUM_REGS,
1561 i386_regmap,
1562 };
d0722149 1563
3aee8918
PA
1564static struct regs_info i386_linux_regs_info =
1565 {
1566 NULL, /* regset_bitmap */
1567 &i386_linux_usrregs_info,
1568 &x86_regsets_info
1569 };
d0722149 1570
3aee8918
PA
1571const struct regs_info *
1572x86_linux_regs_info (void)
1573{
1574#ifdef __x86_64__
1575 if (is_64bit_tdesc ())
1576 return &amd64_linux_regs_info;
1577 else
1578#endif
1579 return &i386_linux_regs_info;
1580}
d0722149 1581
3aee8918
PA
1582/* Initialize the target description for the architecture of the
1583 inferior. */
1570b33e 1584
3aee8918
PA
1585static void
1586x86_arch_setup (void)
1587{
1588 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1589}
1590
219f2f23
PA
1591static int
1592x86_supports_tracepoints (void)
1593{
1594 return 1;
1595}
1596
fa593d66
PA
1597static void
1598append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1599{
1600 write_inferior_memory (*to, buf, len);
1601 *to += len;
1602}
1603
1604static int
1605push_opcode (unsigned char *buf, char *op)
1606{
1607 unsigned char *buf_org = buf;
1608
1609 while (1)
1610 {
1611 char *endptr;
1612 unsigned long ul = strtoul (op, &endptr, 16);
1613
1614 if (endptr == op)
1615 break;
1616
1617 *buf++ = ul;
1618 op = endptr;
1619 }
1620
1621 return buf - buf_org;
1622}
1623
1624#ifdef __x86_64__
1625
1626/* Build a jump pad that saves registers and calls a collection
1627 function. Writes a jump instruction to the jump pad to
1628 JJUMPAD_INSN. The caller is responsible to write it in at the
1629 tracepoint address. */
1630
1631static int
1632amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1633 CORE_ADDR collector,
1634 CORE_ADDR lockaddr,
1635 ULONGEST orig_size,
1636 CORE_ADDR *jump_entry,
405f8e94
SS
1637 CORE_ADDR *trampoline,
1638 ULONGEST *trampoline_size,
fa593d66
PA
1639 unsigned char *jjump_pad_insn,
1640 ULONGEST *jjump_pad_insn_size,
1641 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1642 CORE_ADDR *adjusted_insn_addr_end,
1643 char *err)
fa593d66
PA
1644{
1645 unsigned char buf[40];
1646 int i, offset;
f4647387
YQ
1647 int64_t loffset;
1648
fa593d66
PA
1649 CORE_ADDR buildaddr = *jump_entry;
1650
1651 /* Build the jump pad. */
1652
1653 /* First, do tracepoint data collection. Save registers. */
1654 i = 0;
1655 /* Need to ensure stack pointer saved first. */
1656 buf[i++] = 0x54; /* push %rsp */
1657 buf[i++] = 0x55; /* push %rbp */
1658 buf[i++] = 0x57; /* push %rdi */
1659 buf[i++] = 0x56; /* push %rsi */
1660 buf[i++] = 0x52; /* push %rdx */
1661 buf[i++] = 0x51; /* push %rcx */
1662 buf[i++] = 0x53; /* push %rbx */
1663 buf[i++] = 0x50; /* push %rax */
1664 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1665 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1666 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1667 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1668 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1669 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1670 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1671 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1672 buf[i++] = 0x9c; /* pushfq */
1673 buf[i++] = 0x48; /* movl <addr>,%rdi */
1674 buf[i++] = 0xbf;
1675 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1676 i += sizeof (unsigned long);
1677 buf[i++] = 0x57; /* push %rdi */
1678 append_insns (&buildaddr, i, buf);
1679
1680 /* Stack space for the collecting_t object. */
1681 i = 0;
1682 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1683 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1684 memcpy (buf + i, &tpoint, 8);
1685 i += 8;
1686 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1687 i += push_opcode (&buf[i],
1688 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1689 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1690 append_insns (&buildaddr, i, buf);
1691
1692 /* spin-lock. */
1693 i = 0;
1694 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1695 memcpy (&buf[i], (void *) &lockaddr, 8);
1696 i += 8;
1697 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1698 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1699 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1700 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1701 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1702 append_insns (&buildaddr, i, buf);
1703
1704 /* Set up the gdb_collect call. */
1705 /* At this point, (stack pointer + 0x18) is the base of our saved
1706 register block. */
1707
1708 i = 0;
1709 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1710 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1711
1712 /* tpoint address may be 64-bit wide. */
1713 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1714 memcpy (buf + i, &tpoint, 8);
1715 i += 8;
1716 append_insns (&buildaddr, i, buf);
1717
1718 /* The collector function being in the shared library, may be
1719 >31-bits away off the jump pad. */
1720 i = 0;
1721 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1722 memcpy (buf + i, &collector, 8);
1723 i += 8;
1724 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1725 append_insns (&buildaddr, i, buf);
1726
1727 /* Clear the spin-lock. */
1728 i = 0;
1729 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1730 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1731 memcpy (buf + i, &lockaddr, 8);
1732 i += 8;
1733 append_insns (&buildaddr, i, buf);
1734
1735 /* Remove stack that had been used for the collect_t object. */
1736 i = 0;
1737 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1738 append_insns (&buildaddr, i, buf);
1739
1740 /* Restore register state. */
1741 i = 0;
1742 buf[i++] = 0x48; /* add $0x8,%rsp */
1743 buf[i++] = 0x83;
1744 buf[i++] = 0xc4;
1745 buf[i++] = 0x08;
1746 buf[i++] = 0x9d; /* popfq */
1747 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1748 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1749 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1750 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1751 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1752 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1753 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1754 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1755 buf[i++] = 0x58; /* pop %rax */
1756 buf[i++] = 0x5b; /* pop %rbx */
1757 buf[i++] = 0x59; /* pop %rcx */
1758 buf[i++] = 0x5a; /* pop %rdx */
1759 buf[i++] = 0x5e; /* pop %rsi */
1760 buf[i++] = 0x5f; /* pop %rdi */
1761 buf[i++] = 0x5d; /* pop %rbp */
1762 buf[i++] = 0x5c; /* pop %rsp */
1763 append_insns (&buildaddr, i, buf);
1764
1765 /* Now, adjust the original instruction to execute in the jump
1766 pad. */
1767 *adjusted_insn_addr = buildaddr;
1768 relocate_instruction (&buildaddr, tpaddr);
1769 *adjusted_insn_addr_end = buildaddr;
1770
1771 /* Finally, write a jump back to the program. */
f4647387
YQ
1772
1773 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1774 if (loffset > INT_MAX || loffset < INT_MIN)
1775 {
1776 sprintf (err,
1777 "E.Jump back from jump pad too far from tracepoint "
1778 "(offset 0x%" PRIx64 " > int32).", loffset);
1779 return 1;
1780 }
1781
1782 offset = (int) loffset;
fa593d66
PA
1783 memcpy (buf, jump_insn, sizeof (jump_insn));
1784 memcpy (buf + 1, &offset, 4);
1785 append_insns (&buildaddr, sizeof (jump_insn), buf);
1786
1787 /* The jump pad is now built. Wire in a jump to our jump pad. This
1788 is always done last (by our caller actually), so that we can
1789 install fast tracepoints with threads running. This relies on
1790 the agent's atomic write support. */
f4647387
YQ
1791 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1792 if (loffset > INT_MAX || loffset < INT_MIN)
1793 {
1794 sprintf (err,
1795 "E.Jump pad too far from tracepoint "
1796 "(offset 0x%" PRIx64 " > int32).", loffset);
1797 return 1;
1798 }
1799
1800 offset = (int) loffset;
1801
fa593d66
PA
1802 memcpy (buf, jump_insn, sizeof (jump_insn));
1803 memcpy (buf + 1, &offset, 4);
1804 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1805 *jjump_pad_insn_size = sizeof (jump_insn);
1806
1807 /* Return the end address of our pad. */
1808 *jump_entry = buildaddr;
1809
1810 return 0;
1811}
1812
1813#endif /* __x86_64__ */
1814
1815/* Build a jump pad that saves registers and calls a collection
1816 function. Writes a jump instruction to the jump pad to
1817 JJUMPAD_INSN. The caller is responsible to write it in at the
1818 tracepoint address. */
1819
1820static int
1821i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1822 CORE_ADDR collector,
1823 CORE_ADDR lockaddr,
1824 ULONGEST orig_size,
1825 CORE_ADDR *jump_entry,
405f8e94
SS
1826 CORE_ADDR *trampoline,
1827 ULONGEST *trampoline_size,
fa593d66
PA
1828 unsigned char *jjump_pad_insn,
1829 ULONGEST *jjump_pad_insn_size,
1830 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1831 CORE_ADDR *adjusted_insn_addr_end,
1832 char *err)
fa593d66
PA
1833{
1834 unsigned char buf[0x100];
1835 int i, offset;
1836 CORE_ADDR buildaddr = *jump_entry;
1837
1838 /* Build the jump pad. */
1839
1840 /* First, do tracepoint data collection. Save registers. */
1841 i = 0;
1842 buf[i++] = 0x60; /* pushad */
1843 buf[i++] = 0x68; /* push tpaddr aka $pc */
1844 *((int *)(buf + i)) = (int) tpaddr;
1845 i += 4;
1846 buf[i++] = 0x9c; /* pushf */
1847 buf[i++] = 0x1e; /* push %ds */
1848 buf[i++] = 0x06; /* push %es */
1849 buf[i++] = 0x0f; /* push %fs */
1850 buf[i++] = 0xa0;
1851 buf[i++] = 0x0f; /* push %gs */
1852 buf[i++] = 0xa8;
1853 buf[i++] = 0x16; /* push %ss */
1854 buf[i++] = 0x0e; /* push %cs */
1855 append_insns (&buildaddr, i, buf);
1856
1857 /* Stack space for the collecting_t object. */
1858 i = 0;
1859 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1860
1861 /* Build the object. */
1862 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1863 memcpy (buf + i, &tpoint, 4);
1864 i += 4;
1865 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1866
1867 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1868 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1869 append_insns (&buildaddr, i, buf);
1870
1871 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1872 If we cared for it, this could be using xchg alternatively. */
1873
1874 i = 0;
1875 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1876 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1877 %esp,<lockaddr> */
1878 memcpy (&buf[i], (void *) &lockaddr, 4);
1879 i += 4;
1880 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1881 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1882 append_insns (&buildaddr, i, buf);
1883
1884
1885 /* Set up arguments to the gdb_collect call. */
1886 i = 0;
1887 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1888 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1889 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1890 append_insns (&buildaddr, i, buf);
1891
1892 i = 0;
1893 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1894 append_insns (&buildaddr, i, buf);
1895
1896 i = 0;
1897 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1898 memcpy (&buf[i], (void *) &tpoint, 4);
1899 i += 4;
1900 append_insns (&buildaddr, i, buf);
1901
1902 buf[0] = 0xe8; /* call <reladdr> */
1903 offset = collector - (buildaddr + sizeof (jump_insn));
1904 memcpy (buf + 1, &offset, 4);
1905 append_insns (&buildaddr, 5, buf);
1906 /* Clean up after the call. */
1907 buf[0] = 0x83; /* add $0x8,%esp */
1908 buf[1] = 0xc4;
1909 buf[2] = 0x08;
1910 append_insns (&buildaddr, 3, buf);
1911
1912
1913 /* Clear the spin-lock. This would need the LOCK prefix on older
1914 broken archs. */
1915 i = 0;
1916 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1917 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1918 memcpy (buf + i, &lockaddr, 4);
1919 i += 4;
1920 append_insns (&buildaddr, i, buf);
1921
1922
1923 /* Remove stack that had been used for the collect_t object. */
1924 i = 0;
1925 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1926 append_insns (&buildaddr, i, buf);
1927
1928 i = 0;
1929 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1930 buf[i++] = 0xc4;
1931 buf[i++] = 0x04;
1932 buf[i++] = 0x17; /* pop %ss */
1933 buf[i++] = 0x0f; /* pop %gs */
1934 buf[i++] = 0xa9;
1935 buf[i++] = 0x0f; /* pop %fs */
1936 buf[i++] = 0xa1;
1937 buf[i++] = 0x07; /* pop %es */
405f8e94 1938 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1939 buf[i++] = 0x9d; /* popf */
1940 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1941 buf[i++] = 0xc4;
1942 buf[i++] = 0x04;
1943 buf[i++] = 0x61; /* popad */
1944 append_insns (&buildaddr, i, buf);
1945
1946 /* Now, adjust the original instruction to execute in the jump
1947 pad. */
1948 *adjusted_insn_addr = buildaddr;
1949 relocate_instruction (&buildaddr, tpaddr);
1950 *adjusted_insn_addr_end = buildaddr;
1951
1952 /* Write the jump back to the program. */
1953 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1954 memcpy (buf, jump_insn, sizeof (jump_insn));
1955 memcpy (buf + 1, &offset, 4);
1956 append_insns (&buildaddr, sizeof (jump_insn), buf);
1957
1958 /* The jump pad is now built. Wire in a jump to our jump pad. This
1959 is always done last (by our caller actually), so that we can
1960 install fast tracepoints with threads running. This relies on
1961 the agent's atomic write support. */
405f8e94
SS
1962 if (orig_size == 4)
1963 {
1964 /* Create a trampoline. */
1965 *trampoline_size = sizeof (jump_insn);
1966 if (!claim_trampoline_space (*trampoline_size, trampoline))
1967 {
1968 /* No trampoline space available. */
1969 strcpy (err,
1970 "E.Cannot allocate trampoline space needed for fast "
1971 "tracepoints on 4-byte instructions.");
1972 return 1;
1973 }
1974
1975 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1976 memcpy (buf, jump_insn, sizeof (jump_insn));
1977 memcpy (buf + 1, &offset, 4);
1978 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1979
1980 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1981 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1982 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1983 memcpy (buf + 2, &offset, 2);
1984 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1985 *jjump_pad_insn_size = sizeof (small_jump_insn);
1986 }
1987 else
1988 {
1989 /* Else use a 32-bit relative jump instruction. */
1990 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1991 memcpy (buf, jump_insn, sizeof (jump_insn));
1992 memcpy (buf + 1, &offset, 4);
1993 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1994 *jjump_pad_insn_size = sizeof (jump_insn);
1995 }
fa593d66
PA
1996
1997 /* Return the end address of our pad. */
1998 *jump_entry = buildaddr;
1999
2000 return 0;
2001}
2002
2003static int
2004x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2005 CORE_ADDR collector,
2006 CORE_ADDR lockaddr,
2007 ULONGEST orig_size,
2008 CORE_ADDR *jump_entry,
405f8e94
SS
2009 CORE_ADDR *trampoline,
2010 ULONGEST *trampoline_size,
fa593d66
PA
2011 unsigned char *jjump_pad_insn,
2012 ULONGEST *jjump_pad_insn_size,
2013 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
2014 CORE_ADDR *adjusted_insn_addr_end,
2015 char *err)
fa593d66
PA
2016{
2017#ifdef __x86_64__
3aee8918 2018 if (is_64bit_tdesc ())
fa593d66
PA
2019 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2020 collector, lockaddr,
2021 orig_size, jump_entry,
405f8e94 2022 trampoline, trampoline_size,
fa593d66
PA
2023 jjump_pad_insn,
2024 jjump_pad_insn_size,
2025 adjusted_insn_addr,
405f8e94
SS
2026 adjusted_insn_addr_end,
2027 err);
fa593d66
PA
2028#endif
2029
2030 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2031 collector, lockaddr,
2032 orig_size, jump_entry,
405f8e94 2033 trampoline, trampoline_size,
fa593d66
PA
2034 jjump_pad_insn,
2035 jjump_pad_insn_size,
2036 adjusted_insn_addr,
405f8e94
SS
2037 adjusted_insn_addr_end,
2038 err);
2039}
2040
2041/* Return the minimum instruction length for fast tracepoints on x86/x86-64
2042 architectures. */
2043
2044static int
2045x86_get_min_fast_tracepoint_insn_len (void)
2046{
2047 static int warned_about_fast_tracepoints = 0;
2048
2049#ifdef __x86_64__
2050 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2051 used for fast tracepoints. */
3aee8918 2052 if (is_64bit_tdesc ())
405f8e94
SS
2053 return 5;
2054#endif
2055
58b4daa5 2056 if (agent_loaded_p ())
405f8e94
SS
2057 {
2058 char errbuf[IPA_BUFSIZ];
2059
2060 errbuf[0] = '\0';
2061
2062 /* On x86, if trampolines are available, then 4-byte jump instructions
2063 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2064 with a 4-byte offset are used instead. */
2065 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2066 return 4;
2067 else
2068 {
2069 /* GDB has no channel to explain to user why a shorter fast
2070 tracepoint is not possible, but at least make GDBserver
2071 mention that something has gone awry. */
2072 if (!warned_about_fast_tracepoints)
2073 {
2074 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2075 warned_about_fast_tracepoints = 1;
2076 }
2077 return 5;
2078 }
2079 }
2080 else
2081 {
2082 /* Indicate that the minimum length is currently unknown since the IPA
2083 has not loaded yet. */
2084 return 0;
2085 }
fa593d66
PA
2086}
2087
6a271cae
PA
2088static void
2089add_insns (unsigned char *start, int len)
2090{
2091 CORE_ADDR buildaddr = current_insn_ptr;
2092
2093 if (debug_threads)
87ce2a04
DE
2094 debug_printf ("Adding %d bytes of insn at %s\n",
2095 len, paddress (buildaddr));
6a271cae
PA
2096
2097 append_insns (&buildaddr, len, start);
2098 current_insn_ptr = buildaddr;
2099}
2100
6a271cae
PA
2101/* Our general strategy for emitting code is to avoid specifying raw
2102 bytes whenever possible, and instead copy a block of inline asm
2103 that is embedded in the function. This is a little messy, because
2104 we need to keep the compiler from discarding what looks like dead
2105 code, plus suppress various warnings. */
2106
9e4344e5
PA
2107#define EMIT_ASM(NAME, INSNS) \
2108 do \
2109 { \
2110 extern unsigned char start_ ## NAME, end_ ## NAME; \
2111 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2112 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2113 "\t" "start_" #NAME ":" \
2114 "\t" INSNS "\n" \
2115 "\t" "end_" #NAME ":"); \
2116 } while (0)
6a271cae
PA
2117
2118#ifdef __x86_64__
2119
2120#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2121 do \
2122 { \
2123 extern unsigned char start_ ## NAME, end_ ## NAME; \
2124 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2125 __asm__ (".code32\n" \
2126 "\t" "jmp end_" #NAME "\n" \
2127 "\t" "start_" #NAME ":\n" \
2128 "\t" INSNS "\n" \
2129 "\t" "end_" #NAME ":\n" \
2130 ".code64\n"); \
2131 } while (0)
6a271cae
PA
2132
2133#else
2134
2135#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2136
2137#endif
2138
2139#ifdef __x86_64__
2140
2141static void
2142amd64_emit_prologue (void)
2143{
2144 EMIT_ASM (amd64_prologue,
2145 "pushq %rbp\n\t"
2146 "movq %rsp,%rbp\n\t"
2147 "sub $0x20,%rsp\n\t"
2148 "movq %rdi,-8(%rbp)\n\t"
2149 "movq %rsi,-16(%rbp)");
2150}
2151
2152
2153static void
2154amd64_emit_epilogue (void)
2155{
2156 EMIT_ASM (amd64_epilogue,
2157 "movq -16(%rbp),%rdi\n\t"
2158 "movq %rax,(%rdi)\n\t"
2159 "xor %rax,%rax\n\t"
2160 "leave\n\t"
2161 "ret");
2162}
2163
2164static void
2165amd64_emit_add (void)
2166{
2167 EMIT_ASM (amd64_add,
2168 "add (%rsp),%rax\n\t"
2169 "lea 0x8(%rsp),%rsp");
2170}
2171
2172static void
2173amd64_emit_sub (void)
2174{
2175 EMIT_ASM (amd64_sub,
2176 "sub %rax,(%rsp)\n\t"
2177 "pop %rax");
2178}
2179
2180static void
2181amd64_emit_mul (void)
2182{
2183 emit_error = 1;
2184}
2185
2186static void
2187amd64_emit_lsh (void)
2188{
2189 emit_error = 1;
2190}
2191
2192static void
2193amd64_emit_rsh_signed (void)
2194{
2195 emit_error = 1;
2196}
2197
2198static void
2199amd64_emit_rsh_unsigned (void)
2200{
2201 emit_error = 1;
2202}
2203
2204static void
2205amd64_emit_ext (int arg)
2206{
2207 switch (arg)
2208 {
2209 case 8:
2210 EMIT_ASM (amd64_ext_8,
2211 "cbtw\n\t"
2212 "cwtl\n\t"
2213 "cltq");
2214 break;
2215 case 16:
2216 EMIT_ASM (amd64_ext_16,
2217 "cwtl\n\t"
2218 "cltq");
2219 break;
2220 case 32:
2221 EMIT_ASM (amd64_ext_32,
2222 "cltq");
2223 break;
2224 default:
2225 emit_error = 1;
2226 }
2227}
2228
2229static void
2230amd64_emit_log_not (void)
2231{
2232 EMIT_ASM (amd64_log_not,
2233 "test %rax,%rax\n\t"
2234 "sete %cl\n\t"
2235 "movzbq %cl,%rax");
2236}
2237
2238static void
2239amd64_emit_bit_and (void)
2240{
2241 EMIT_ASM (amd64_and,
2242 "and (%rsp),%rax\n\t"
2243 "lea 0x8(%rsp),%rsp");
2244}
2245
2246static void
2247amd64_emit_bit_or (void)
2248{
2249 EMIT_ASM (amd64_or,
2250 "or (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2252}
2253
2254static void
2255amd64_emit_bit_xor (void)
2256{
2257 EMIT_ASM (amd64_xor,
2258 "xor (%rsp),%rax\n\t"
2259 "lea 0x8(%rsp),%rsp");
2260}
2261
2262static void
2263amd64_emit_bit_not (void)
2264{
2265 EMIT_ASM (amd64_bit_not,
2266 "xorq $0xffffffffffffffff,%rax");
2267}
2268
2269static void
2270amd64_emit_equal (void)
2271{
2272 EMIT_ASM (amd64_equal,
2273 "cmp %rax,(%rsp)\n\t"
2274 "je .Lamd64_equal_true\n\t"
2275 "xor %rax,%rax\n\t"
2276 "jmp .Lamd64_equal_end\n\t"
2277 ".Lamd64_equal_true:\n\t"
2278 "mov $0x1,%rax\n\t"
2279 ".Lamd64_equal_end:\n\t"
2280 "lea 0x8(%rsp),%rsp");
2281}
2282
2283static void
2284amd64_emit_less_signed (void)
2285{
2286 EMIT_ASM (amd64_less_signed,
2287 "cmp %rax,(%rsp)\n\t"
2288 "jl .Lamd64_less_signed_true\n\t"
2289 "xor %rax,%rax\n\t"
2290 "jmp .Lamd64_less_signed_end\n\t"
2291 ".Lamd64_less_signed_true:\n\t"
2292 "mov $1,%rax\n\t"
2293 ".Lamd64_less_signed_end:\n\t"
2294 "lea 0x8(%rsp),%rsp");
2295}
2296
2297static void
2298amd64_emit_less_unsigned (void)
2299{
2300 EMIT_ASM (amd64_less_unsigned,
2301 "cmp %rax,(%rsp)\n\t"
2302 "jb .Lamd64_less_unsigned_true\n\t"
2303 "xor %rax,%rax\n\t"
2304 "jmp .Lamd64_less_unsigned_end\n\t"
2305 ".Lamd64_less_unsigned_true:\n\t"
2306 "mov $1,%rax\n\t"
2307 ".Lamd64_less_unsigned_end:\n\t"
2308 "lea 0x8(%rsp),%rsp");
2309}
2310
2311static void
2312amd64_emit_ref (int size)
2313{
2314 switch (size)
2315 {
2316 case 1:
2317 EMIT_ASM (amd64_ref1,
2318 "movb (%rax),%al");
2319 break;
2320 case 2:
2321 EMIT_ASM (amd64_ref2,
2322 "movw (%rax),%ax");
2323 break;
2324 case 4:
2325 EMIT_ASM (amd64_ref4,
2326 "movl (%rax),%eax");
2327 break;
2328 case 8:
2329 EMIT_ASM (amd64_ref8,
2330 "movq (%rax),%rax");
2331 break;
2332 }
2333}
2334
2335static void
2336amd64_emit_if_goto (int *offset_p, int *size_p)
2337{
2338 EMIT_ASM (amd64_if_goto,
2339 "mov %rax,%rcx\n\t"
2340 "pop %rax\n\t"
2341 "cmp $0,%rcx\n\t"
2342 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2343 if (offset_p)
2344 *offset_p = 10;
2345 if (size_p)
2346 *size_p = 4;
2347}
2348
2349static void
2350amd64_emit_goto (int *offset_p, int *size_p)
2351{
2352 EMIT_ASM (amd64_goto,
2353 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2354 if (offset_p)
2355 *offset_p = 1;
2356 if (size_p)
2357 *size_p = 4;
2358}
2359
2360static void
2361amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2362{
2363 int diff = (to - (from + size));
2364 unsigned char buf[sizeof (int)];
2365
2366 if (size != 4)
2367 {
2368 emit_error = 1;
2369 return;
2370 }
2371
2372 memcpy (buf, &diff, sizeof (int));
2373 write_inferior_memory (from, buf, sizeof (int));
2374}
2375
2376static void
4e29fb54 2377amd64_emit_const (LONGEST num)
6a271cae
PA
2378{
2379 unsigned char buf[16];
2380 int i;
2381 CORE_ADDR buildaddr = current_insn_ptr;
2382
2383 i = 0;
2384 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2385 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2386 i += 8;
2387 append_insns (&buildaddr, i, buf);
2388 current_insn_ptr = buildaddr;
2389}
2390
2391static void
2392amd64_emit_call (CORE_ADDR fn)
2393{
2394 unsigned char buf[16];
2395 int i;
2396 CORE_ADDR buildaddr;
4e29fb54 2397 LONGEST offset64;
6a271cae
PA
2398
2399 /* The destination function being in the shared library, may be
2400 >31-bits away off the compiled code pad. */
2401
2402 buildaddr = current_insn_ptr;
2403
2404 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2405
2406 i = 0;
2407
2408 if (offset64 > INT_MAX || offset64 < INT_MIN)
2409 {
2410 /* Offset is too large for a call. Use callq, but that requires
2411 a register, so avoid it if possible. Use r10, since it is
2412 call-clobbered, we don't have to push/pop it. */
2413 buf[i++] = 0x48; /* mov $fn,%r10 */
2414 buf[i++] = 0xba;
2415 memcpy (buf + i, &fn, 8);
2416 i += 8;
2417 buf[i++] = 0xff; /* callq *%r10 */
2418 buf[i++] = 0xd2;
2419 }
2420 else
2421 {
2422 int offset32 = offset64; /* we know we can't overflow here. */
2423 memcpy (buf + i, &offset32, 4);
2424 i += 4;
2425 }
2426
2427 append_insns (&buildaddr, i, buf);
2428 current_insn_ptr = buildaddr;
2429}
2430
2431static void
2432amd64_emit_reg (int reg)
2433{
2434 unsigned char buf[16];
2435 int i;
2436 CORE_ADDR buildaddr;
2437
2438 /* Assume raw_regs is still in %rdi. */
2439 buildaddr = current_insn_ptr;
2440 i = 0;
2441 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2442 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2443 i += 4;
2444 append_insns (&buildaddr, i, buf);
2445 current_insn_ptr = buildaddr;
2446 amd64_emit_call (get_raw_reg_func_addr ());
2447}
2448
2449static void
2450amd64_emit_pop (void)
2451{
2452 EMIT_ASM (amd64_pop,
2453 "pop %rax");
2454}
2455
2456static void
2457amd64_emit_stack_flush (void)
2458{
2459 EMIT_ASM (amd64_stack_flush,
2460 "push %rax");
2461}
2462
2463static void
2464amd64_emit_zero_ext (int arg)
2465{
2466 switch (arg)
2467 {
2468 case 8:
2469 EMIT_ASM (amd64_zero_ext_8,
2470 "and $0xff,%rax");
2471 break;
2472 case 16:
2473 EMIT_ASM (amd64_zero_ext_16,
2474 "and $0xffff,%rax");
2475 break;
2476 case 32:
2477 EMIT_ASM (amd64_zero_ext_32,
2478 "mov $0xffffffff,%rcx\n\t"
2479 "and %rcx,%rax");
2480 break;
2481 default:
2482 emit_error = 1;
2483 }
2484}
2485
2486static void
2487amd64_emit_swap (void)
2488{
2489 EMIT_ASM (amd64_swap,
2490 "mov %rax,%rcx\n\t"
2491 "pop %rax\n\t"
2492 "push %rcx");
2493}
2494
2495static void
2496amd64_emit_stack_adjust (int n)
2497{
2498 unsigned char buf[16];
2499 int i;
2500 CORE_ADDR buildaddr = current_insn_ptr;
2501
2502 i = 0;
2503 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2504 buf[i++] = 0x8d;
2505 buf[i++] = 0x64;
2506 buf[i++] = 0x24;
2507 /* This only handles adjustments up to 16, but we don't expect any more. */
2508 buf[i++] = n * 8;
2509 append_insns (&buildaddr, i, buf);
2510 current_insn_ptr = buildaddr;
2511}
2512
2513/* FN's prototype is `LONGEST(*fn)(int)'. */
2514
2515static void
2516amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2517{
2518 unsigned char buf[16];
2519 int i;
2520 CORE_ADDR buildaddr;
2521
2522 buildaddr = current_insn_ptr;
2523 i = 0;
2524 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2525 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2526 i += 4;
2527 append_insns (&buildaddr, i, buf);
2528 current_insn_ptr = buildaddr;
2529 amd64_emit_call (fn);
2530}
2531
4e29fb54 2532/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2533
2534static void
2535amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2536{
2537 unsigned char buf[16];
2538 int i;
2539 CORE_ADDR buildaddr;
2540
2541 buildaddr = current_insn_ptr;
2542 i = 0;
2543 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2544 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2545 i += 4;
2546 append_insns (&buildaddr, i, buf);
2547 current_insn_ptr = buildaddr;
2548 EMIT_ASM (amd64_void_call_2_a,
2549 /* Save away a copy of the stack top. */
2550 "push %rax\n\t"
2551 /* Also pass top as the second argument. */
2552 "mov %rax,%rsi");
2553 amd64_emit_call (fn);
2554 EMIT_ASM (amd64_void_call_2_b,
2555 /* Restore the stack top, %rax may have been trashed. */
2556 "pop %rax");
2557}
2558
6b9801d4
SS
2559void
2560amd64_emit_eq_goto (int *offset_p, int *size_p)
2561{
2562 EMIT_ASM (amd64_eq,
2563 "cmp %rax,(%rsp)\n\t"
2564 "jne .Lamd64_eq_fallthru\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2566 "pop %rax\n\t"
2567 /* jmp, but don't trust the assembler to choose the right jump */
2568 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2569 ".Lamd64_eq_fallthru:\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2571 "pop %rax");
2572
2573 if (offset_p)
2574 *offset_p = 13;
2575 if (size_p)
2576 *size_p = 4;
2577}
2578
2579void
2580amd64_emit_ne_goto (int *offset_p, int *size_p)
2581{
2582 EMIT_ASM (amd64_ne,
2583 "cmp %rax,(%rsp)\n\t"
2584 "je .Lamd64_ne_fallthru\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax\n\t"
2587 /* jmp, but don't trust the assembler to choose the right jump */
2588 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2589 ".Lamd64_ne_fallthru:\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2591 "pop %rax");
2592
2593 if (offset_p)
2594 *offset_p = 13;
2595 if (size_p)
2596 *size_p = 4;
2597}
2598
2599void
2600amd64_emit_lt_goto (int *offset_p, int *size_p)
2601{
2602 EMIT_ASM (amd64_lt,
2603 "cmp %rax,(%rsp)\n\t"
2604 "jnl .Lamd64_lt_fallthru\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2606 "pop %rax\n\t"
2607 /* jmp, but don't trust the assembler to choose the right jump */
2608 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2609 ".Lamd64_lt_fallthru:\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2611 "pop %rax");
2612
2613 if (offset_p)
2614 *offset_p = 13;
2615 if (size_p)
2616 *size_p = 4;
2617}
2618
2619void
2620amd64_emit_le_goto (int *offset_p, int *size_p)
2621{
2622 EMIT_ASM (amd64_le,
2623 "cmp %rax,(%rsp)\n\t"
2624 "jnle .Lamd64_le_fallthru\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2626 "pop %rax\n\t"
2627 /* jmp, but don't trust the assembler to choose the right jump */
2628 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2629 ".Lamd64_le_fallthru:\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2631 "pop %rax");
2632
2633 if (offset_p)
2634 *offset_p = 13;
2635 if (size_p)
2636 *size_p = 4;
2637}
2638
2639void
2640amd64_emit_gt_goto (int *offset_p, int *size_p)
2641{
2642 EMIT_ASM (amd64_gt,
2643 "cmp %rax,(%rsp)\n\t"
2644 "jng .Lamd64_gt_fallthru\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2646 "pop %rax\n\t"
2647 /* jmp, but don't trust the assembler to choose the right jump */
2648 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2649 ".Lamd64_gt_fallthru:\n\t"
2650 "lea 0x8(%rsp),%rsp\n\t"
2651 "pop %rax");
2652
2653 if (offset_p)
2654 *offset_p = 13;
2655 if (size_p)
2656 *size_p = 4;
2657}
2658
2659void
2660amd64_emit_ge_goto (int *offset_p, int *size_p)
2661{
2662 EMIT_ASM (amd64_ge,
2663 "cmp %rax,(%rsp)\n\t"
2664 "jnge .Lamd64_ge_fallthru\n\t"
2665 ".Lamd64_ge_jump:\n\t"
2666 "lea 0x8(%rsp),%rsp\n\t"
2667 "pop %rax\n\t"
2668 /* jmp, but don't trust the assembler to choose the right jump */
2669 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2670 ".Lamd64_ge_fallthru:\n\t"
2671 "lea 0x8(%rsp),%rsp\n\t"
2672 "pop %rax");
2673
2674 if (offset_p)
2675 *offset_p = 13;
2676 if (size_p)
2677 *size_p = 4;
2678}
2679
6a271cae
PA
2680struct emit_ops amd64_emit_ops =
2681 {
2682 amd64_emit_prologue,
2683 amd64_emit_epilogue,
2684 amd64_emit_add,
2685 amd64_emit_sub,
2686 amd64_emit_mul,
2687 amd64_emit_lsh,
2688 amd64_emit_rsh_signed,
2689 amd64_emit_rsh_unsigned,
2690 amd64_emit_ext,
2691 amd64_emit_log_not,
2692 amd64_emit_bit_and,
2693 amd64_emit_bit_or,
2694 amd64_emit_bit_xor,
2695 amd64_emit_bit_not,
2696 amd64_emit_equal,
2697 amd64_emit_less_signed,
2698 amd64_emit_less_unsigned,
2699 amd64_emit_ref,
2700 amd64_emit_if_goto,
2701 amd64_emit_goto,
2702 amd64_write_goto_address,
2703 amd64_emit_const,
2704 amd64_emit_call,
2705 amd64_emit_reg,
2706 amd64_emit_pop,
2707 amd64_emit_stack_flush,
2708 amd64_emit_zero_ext,
2709 amd64_emit_swap,
2710 amd64_emit_stack_adjust,
2711 amd64_emit_int_call_1,
6b9801d4
SS
2712 amd64_emit_void_call_2,
2713 amd64_emit_eq_goto,
2714 amd64_emit_ne_goto,
2715 amd64_emit_lt_goto,
2716 amd64_emit_le_goto,
2717 amd64_emit_gt_goto,
2718 amd64_emit_ge_goto
6a271cae
PA
2719 };
2720
2721#endif /* __x86_64__ */
2722
2723static void
2724i386_emit_prologue (void)
2725{
2726 EMIT_ASM32 (i386_prologue,
2727 "push %ebp\n\t"
bf15cbda
SS
2728 "mov %esp,%ebp\n\t"
2729 "push %ebx");
6a271cae
PA
2730 /* At this point, the raw regs base address is at 8(%ebp), and the
2731 value pointer is at 12(%ebp). */
2732}
2733
2734static void
2735i386_emit_epilogue (void)
2736{
2737 EMIT_ASM32 (i386_epilogue,
2738 "mov 12(%ebp),%ecx\n\t"
2739 "mov %eax,(%ecx)\n\t"
2740 "mov %ebx,0x4(%ecx)\n\t"
2741 "xor %eax,%eax\n\t"
bf15cbda 2742 "pop %ebx\n\t"
6a271cae
PA
2743 "pop %ebp\n\t"
2744 "ret");
2745}
2746
2747static void
2748i386_emit_add (void)
2749{
2750 EMIT_ASM32 (i386_add,
2751 "add (%esp),%eax\n\t"
2752 "adc 0x4(%esp),%ebx\n\t"
2753 "lea 0x8(%esp),%esp");
2754}
2755
2756static void
2757i386_emit_sub (void)
2758{
2759 EMIT_ASM32 (i386_sub,
2760 "subl %eax,(%esp)\n\t"
2761 "sbbl %ebx,4(%esp)\n\t"
2762 "pop %eax\n\t"
2763 "pop %ebx\n\t");
2764}
2765
2766static void
2767i386_emit_mul (void)
2768{
2769 emit_error = 1;
2770}
2771
2772static void
2773i386_emit_lsh (void)
2774{
2775 emit_error = 1;
2776}
2777
2778static void
2779i386_emit_rsh_signed (void)
2780{
2781 emit_error = 1;
2782}
2783
2784static void
2785i386_emit_rsh_unsigned (void)
2786{
2787 emit_error = 1;
2788}
2789
2790static void
2791i386_emit_ext (int arg)
2792{
2793 switch (arg)
2794 {
2795 case 8:
2796 EMIT_ASM32 (i386_ext_8,
2797 "cbtw\n\t"
2798 "cwtl\n\t"
2799 "movl %eax,%ebx\n\t"
2800 "sarl $31,%ebx");
2801 break;
2802 case 16:
2803 EMIT_ASM32 (i386_ext_16,
2804 "cwtl\n\t"
2805 "movl %eax,%ebx\n\t"
2806 "sarl $31,%ebx");
2807 break;
2808 case 32:
2809 EMIT_ASM32 (i386_ext_32,
2810 "movl %eax,%ebx\n\t"
2811 "sarl $31,%ebx");
2812 break;
2813 default:
2814 emit_error = 1;
2815 }
2816}
2817
2818static void
2819i386_emit_log_not (void)
2820{
2821 EMIT_ASM32 (i386_log_not,
2822 "or %ebx,%eax\n\t"
2823 "test %eax,%eax\n\t"
2824 "sete %cl\n\t"
2825 "xor %ebx,%ebx\n\t"
2826 "movzbl %cl,%eax");
2827}
2828
2829static void
2830i386_emit_bit_and (void)
2831{
2832 EMIT_ASM32 (i386_and,
2833 "and (%esp),%eax\n\t"
2834 "and 0x4(%esp),%ebx\n\t"
2835 "lea 0x8(%esp),%esp");
2836}
2837
2838static void
2839i386_emit_bit_or (void)
2840{
2841 EMIT_ASM32 (i386_or,
2842 "or (%esp),%eax\n\t"
2843 "or 0x4(%esp),%ebx\n\t"
2844 "lea 0x8(%esp),%esp");
2845}
2846
2847static void
2848i386_emit_bit_xor (void)
2849{
2850 EMIT_ASM32 (i386_xor,
2851 "xor (%esp),%eax\n\t"
2852 "xor 0x4(%esp),%ebx\n\t"
2853 "lea 0x8(%esp),%esp");
2854}
2855
2856static void
2857i386_emit_bit_not (void)
2858{
2859 EMIT_ASM32 (i386_bit_not,
2860 "xor $0xffffffff,%eax\n\t"
2861 "xor $0xffffffff,%ebx\n\t");
2862}
2863
2864static void
2865i386_emit_equal (void)
2866{
2867 EMIT_ASM32 (i386_equal,
2868 "cmpl %ebx,4(%esp)\n\t"
2869 "jne .Li386_equal_false\n\t"
2870 "cmpl %eax,(%esp)\n\t"
2871 "je .Li386_equal_true\n\t"
2872 ".Li386_equal_false:\n\t"
2873 "xor %eax,%eax\n\t"
2874 "jmp .Li386_equal_end\n\t"
2875 ".Li386_equal_true:\n\t"
2876 "mov $1,%eax\n\t"
2877 ".Li386_equal_end:\n\t"
2878 "xor %ebx,%ebx\n\t"
2879 "lea 0x8(%esp),%esp");
2880}
2881
2882static void
2883i386_emit_less_signed (void)
2884{
2885 EMIT_ASM32 (i386_less_signed,
2886 "cmpl %ebx,4(%esp)\n\t"
2887 "jl .Li386_less_signed_true\n\t"
2888 "jne .Li386_less_signed_false\n\t"
2889 "cmpl %eax,(%esp)\n\t"
2890 "jl .Li386_less_signed_true\n\t"
2891 ".Li386_less_signed_false:\n\t"
2892 "xor %eax,%eax\n\t"
2893 "jmp .Li386_less_signed_end\n\t"
2894 ".Li386_less_signed_true:\n\t"
2895 "mov $1,%eax\n\t"
2896 ".Li386_less_signed_end:\n\t"
2897 "xor %ebx,%ebx\n\t"
2898 "lea 0x8(%esp),%esp");
2899}
2900
2901static void
2902i386_emit_less_unsigned (void)
2903{
2904 EMIT_ASM32 (i386_less_unsigned,
2905 "cmpl %ebx,4(%esp)\n\t"
2906 "jb .Li386_less_unsigned_true\n\t"
2907 "jne .Li386_less_unsigned_false\n\t"
2908 "cmpl %eax,(%esp)\n\t"
2909 "jb .Li386_less_unsigned_true\n\t"
2910 ".Li386_less_unsigned_false:\n\t"
2911 "xor %eax,%eax\n\t"
2912 "jmp .Li386_less_unsigned_end\n\t"
2913 ".Li386_less_unsigned_true:\n\t"
2914 "mov $1,%eax\n\t"
2915 ".Li386_less_unsigned_end:\n\t"
2916 "xor %ebx,%ebx\n\t"
2917 "lea 0x8(%esp),%esp");
2918}
2919
2920static void
2921i386_emit_ref (int size)
2922{
2923 switch (size)
2924 {
2925 case 1:
2926 EMIT_ASM32 (i386_ref1,
2927 "movb (%eax),%al");
2928 break;
2929 case 2:
2930 EMIT_ASM32 (i386_ref2,
2931 "movw (%eax),%ax");
2932 break;
2933 case 4:
2934 EMIT_ASM32 (i386_ref4,
2935 "movl (%eax),%eax");
2936 break;
2937 case 8:
2938 EMIT_ASM32 (i386_ref8,
2939 "movl 4(%eax),%ebx\n\t"
2940 "movl (%eax),%eax");
2941 break;
2942 }
2943}
2944
2945static void
2946i386_emit_if_goto (int *offset_p, int *size_p)
2947{
2948 EMIT_ASM32 (i386_if_goto,
2949 "mov %eax,%ecx\n\t"
2950 "or %ebx,%ecx\n\t"
2951 "pop %eax\n\t"
2952 "pop %ebx\n\t"
2953 "cmpl $0,%ecx\n\t"
2954 /* Don't trust the assembler to choose the right jump */
2955 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2956
2957 if (offset_p)
2958 *offset_p = 11; /* be sure that this matches the sequence above */
2959 if (size_p)
2960 *size_p = 4;
2961}
2962
2963static void
2964i386_emit_goto (int *offset_p, int *size_p)
2965{
2966 EMIT_ASM32 (i386_goto,
2967 /* Don't trust the assembler to choose the right jump */
2968 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2969 if (offset_p)
2970 *offset_p = 1;
2971 if (size_p)
2972 *size_p = 4;
2973}
2974
2975static void
2976i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2977{
2978 int diff = (to - (from + size));
2979 unsigned char buf[sizeof (int)];
2980
2981 /* We're only doing 4-byte sizes at the moment. */
2982 if (size != 4)
2983 {
2984 emit_error = 1;
2985 return;
2986 }
2987
2988 memcpy (buf, &diff, sizeof (int));
2989 write_inferior_memory (from, buf, sizeof (int));
2990}
2991
2992static void
4e29fb54 2993i386_emit_const (LONGEST num)
6a271cae
PA
2994{
2995 unsigned char buf[16];
b00ad6ff 2996 int i, hi, lo;
6a271cae
PA
2997 CORE_ADDR buildaddr = current_insn_ptr;
2998
2999 i = 0;
3000 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
3001 lo = num & 0xffffffff;
3002 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
3003 i += 4;
3004 hi = ((num >> 32) & 0xffffffff);
3005 if (hi)
3006 {
3007 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 3008 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
3009 i += 4;
3010 }
3011 else
3012 {
3013 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3014 }
3015 append_insns (&buildaddr, i, buf);
3016 current_insn_ptr = buildaddr;
3017}
3018
3019static void
3020i386_emit_call (CORE_ADDR fn)
3021{
3022 unsigned char buf[16];
3023 int i, offset;
3024 CORE_ADDR buildaddr;
3025
3026 buildaddr = current_insn_ptr;
3027 i = 0;
3028 buf[i++] = 0xe8; /* call <reladdr> */
3029 offset = ((int) fn) - (buildaddr + 5);
3030 memcpy (buf + 1, &offset, 4);
3031 append_insns (&buildaddr, 5, buf);
3032 current_insn_ptr = buildaddr;
3033}
3034
3035static void
3036i386_emit_reg (int reg)
3037{
3038 unsigned char buf[16];
3039 int i;
3040 CORE_ADDR buildaddr;
3041
3042 EMIT_ASM32 (i386_reg_a,
3043 "sub $0x8,%esp");
3044 buildaddr = current_insn_ptr;
3045 i = 0;
3046 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 3047 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
3048 i += 4;
3049 append_insns (&buildaddr, i, buf);
3050 current_insn_ptr = buildaddr;
3051 EMIT_ASM32 (i386_reg_b,
3052 "mov %eax,4(%esp)\n\t"
3053 "mov 8(%ebp),%eax\n\t"
3054 "mov %eax,(%esp)");
3055 i386_emit_call (get_raw_reg_func_addr ());
3056 EMIT_ASM32 (i386_reg_c,
3057 "xor %ebx,%ebx\n\t"
3058 "lea 0x8(%esp),%esp");
3059}
3060
3061static void
3062i386_emit_pop (void)
3063{
3064 EMIT_ASM32 (i386_pop,
3065 "pop %eax\n\t"
3066 "pop %ebx");
3067}
3068
3069static void
3070i386_emit_stack_flush (void)
3071{
3072 EMIT_ASM32 (i386_stack_flush,
3073 "push %ebx\n\t"
3074 "push %eax");
3075}
3076
3077static void
3078i386_emit_zero_ext (int arg)
3079{
3080 switch (arg)
3081 {
3082 case 8:
3083 EMIT_ASM32 (i386_zero_ext_8,
3084 "and $0xff,%eax\n\t"
3085 "xor %ebx,%ebx");
3086 break;
3087 case 16:
3088 EMIT_ASM32 (i386_zero_ext_16,
3089 "and $0xffff,%eax\n\t"
3090 "xor %ebx,%ebx");
3091 break;
3092 case 32:
3093 EMIT_ASM32 (i386_zero_ext_32,
3094 "xor %ebx,%ebx");
3095 break;
3096 default:
3097 emit_error = 1;
3098 }
3099}
3100
3101static void
3102i386_emit_swap (void)
3103{
3104 EMIT_ASM32 (i386_swap,
3105 "mov %eax,%ecx\n\t"
3106 "mov %ebx,%edx\n\t"
3107 "pop %eax\n\t"
3108 "pop %ebx\n\t"
3109 "push %edx\n\t"
3110 "push %ecx");
3111}
3112
3113static void
3114i386_emit_stack_adjust (int n)
3115{
3116 unsigned char buf[16];
3117 int i;
3118 CORE_ADDR buildaddr = current_insn_ptr;
3119
3120 i = 0;
3121 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3122 buf[i++] = 0x64;
3123 buf[i++] = 0x24;
3124 buf[i++] = n * 8;
3125 append_insns (&buildaddr, i, buf);
3126 current_insn_ptr = buildaddr;
3127}
3128
3129/* FN's prototype is `LONGEST(*fn)(int)'. */
3130
3131static void
3132i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3133{
3134 unsigned char buf[16];
3135 int i;
3136 CORE_ADDR buildaddr;
3137
3138 EMIT_ASM32 (i386_int_call_1_a,
3139 /* Reserve a bit of stack space. */
3140 "sub $0x8,%esp");
3141 /* Put the one argument on the stack. */
3142 buildaddr = current_insn_ptr;
3143 i = 0;
3144 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3145 buf[i++] = 0x04;
3146 buf[i++] = 0x24;
b00ad6ff 3147 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3148 i += 4;
3149 append_insns (&buildaddr, i, buf);
3150 current_insn_ptr = buildaddr;
3151 i386_emit_call (fn);
3152 EMIT_ASM32 (i386_int_call_1_c,
3153 "mov %edx,%ebx\n\t"
3154 "lea 0x8(%esp),%esp");
3155}
3156
4e29fb54 3157/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3158
3159static void
3160i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3161{
3162 unsigned char buf[16];
3163 int i;
3164 CORE_ADDR buildaddr;
3165
3166 EMIT_ASM32 (i386_void_call_2_a,
3167 /* Preserve %eax only; we don't have to worry about %ebx. */
3168 "push %eax\n\t"
3169 /* Reserve a bit of stack space for arguments. */
3170 "sub $0x10,%esp\n\t"
3171 /* Copy "top" to the second argument position. (Note that
3172 we can't assume function won't scribble on its
3173 arguments, so don't try to restore from this.) */
3174 "mov %eax,4(%esp)\n\t"
3175 "mov %ebx,8(%esp)");
3176 /* Put the first argument on the stack. */
3177 buildaddr = current_insn_ptr;
3178 i = 0;
3179 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3180 buf[i++] = 0x04;
3181 buf[i++] = 0x24;
b00ad6ff 3182 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3183 i += 4;
3184 append_insns (&buildaddr, i, buf);
3185 current_insn_ptr = buildaddr;
3186 i386_emit_call (fn);
3187 EMIT_ASM32 (i386_void_call_2_b,
3188 "lea 0x10(%esp),%esp\n\t"
3189 /* Restore original stack top. */
3190 "pop %eax");
3191}
3192
6b9801d4
SS
3193
3194void
3195i386_emit_eq_goto (int *offset_p, int *size_p)
3196{
3197 EMIT_ASM32 (eq,
3198 /* Check low half first, more likely to be decider */
3199 "cmpl %eax,(%esp)\n\t"
3200 "jne .Leq_fallthru\n\t"
3201 "cmpl %ebx,4(%esp)\n\t"
3202 "jne .Leq_fallthru\n\t"
3203 "lea 0x8(%esp),%esp\n\t"
3204 "pop %eax\n\t"
3205 "pop %ebx\n\t"
3206 /* jmp, but don't trust the assembler to choose the right jump */
3207 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3208 ".Leq_fallthru:\n\t"
3209 "lea 0x8(%esp),%esp\n\t"
3210 "pop %eax\n\t"
3211 "pop %ebx");
3212
3213 if (offset_p)
3214 *offset_p = 18;
3215 if (size_p)
3216 *size_p = 4;
3217}
3218
3219void
3220i386_emit_ne_goto (int *offset_p, int *size_p)
3221{
3222 EMIT_ASM32 (ne,
3223 /* Check low half first, more likely to be decider */
3224 "cmpl %eax,(%esp)\n\t"
3225 "jne .Lne_jump\n\t"
3226 "cmpl %ebx,4(%esp)\n\t"
3227 "je .Lne_fallthru\n\t"
3228 ".Lne_jump:\n\t"
3229 "lea 0x8(%esp),%esp\n\t"
3230 "pop %eax\n\t"
3231 "pop %ebx\n\t"
3232 /* jmp, but don't trust the assembler to choose the right jump */
3233 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3234 ".Lne_fallthru:\n\t"
3235 "lea 0x8(%esp),%esp\n\t"
3236 "pop %eax\n\t"
3237 "pop %ebx");
3238
3239 if (offset_p)
3240 *offset_p = 18;
3241 if (size_p)
3242 *size_p = 4;
3243}
3244
3245void
3246i386_emit_lt_goto (int *offset_p, int *size_p)
3247{
3248 EMIT_ASM32 (lt,
3249 "cmpl %ebx,4(%esp)\n\t"
3250 "jl .Llt_jump\n\t"
3251 "jne .Llt_fallthru\n\t"
3252 "cmpl %eax,(%esp)\n\t"
3253 "jnl .Llt_fallthru\n\t"
3254 ".Llt_jump:\n\t"
3255 "lea 0x8(%esp),%esp\n\t"
3256 "pop %eax\n\t"
3257 "pop %ebx\n\t"
3258 /* jmp, but don't trust the assembler to choose the right jump */
3259 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3260 ".Llt_fallthru:\n\t"
3261 "lea 0x8(%esp),%esp\n\t"
3262 "pop %eax\n\t"
3263 "pop %ebx");
3264
3265 if (offset_p)
3266 *offset_p = 20;
3267 if (size_p)
3268 *size_p = 4;
3269}
3270
3271void
3272i386_emit_le_goto (int *offset_p, int *size_p)
3273{
3274 EMIT_ASM32 (le,
3275 "cmpl %ebx,4(%esp)\n\t"
3276 "jle .Lle_jump\n\t"
3277 "jne .Lle_fallthru\n\t"
3278 "cmpl %eax,(%esp)\n\t"
3279 "jnle .Lle_fallthru\n\t"
3280 ".Lle_jump:\n\t"
3281 "lea 0x8(%esp),%esp\n\t"
3282 "pop %eax\n\t"
3283 "pop %ebx\n\t"
3284 /* jmp, but don't trust the assembler to choose the right jump */
3285 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3286 ".Lle_fallthru:\n\t"
3287 "lea 0x8(%esp),%esp\n\t"
3288 "pop %eax\n\t"
3289 "pop %ebx");
3290
3291 if (offset_p)
3292 *offset_p = 20;
3293 if (size_p)
3294 *size_p = 4;
3295}
3296
3297void
3298i386_emit_gt_goto (int *offset_p, int *size_p)
3299{
3300 EMIT_ASM32 (gt,
3301 "cmpl %ebx,4(%esp)\n\t"
3302 "jg .Lgt_jump\n\t"
3303 "jne .Lgt_fallthru\n\t"
3304 "cmpl %eax,(%esp)\n\t"
3305 "jng .Lgt_fallthru\n\t"
3306 ".Lgt_jump:\n\t"
3307 "lea 0x8(%esp),%esp\n\t"
3308 "pop %eax\n\t"
3309 "pop %ebx\n\t"
3310 /* jmp, but don't trust the assembler to choose the right jump */
3311 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3312 ".Lgt_fallthru:\n\t"
3313 "lea 0x8(%esp),%esp\n\t"
3314 "pop %eax\n\t"
3315 "pop %ebx");
3316
3317 if (offset_p)
3318 *offset_p = 20;
3319 if (size_p)
3320 *size_p = 4;
3321}
3322
3323void
3324i386_emit_ge_goto (int *offset_p, int *size_p)
3325{
3326 EMIT_ASM32 (ge,
3327 "cmpl %ebx,4(%esp)\n\t"
3328 "jge .Lge_jump\n\t"
3329 "jne .Lge_fallthru\n\t"
3330 "cmpl %eax,(%esp)\n\t"
3331 "jnge .Lge_fallthru\n\t"
3332 ".Lge_jump:\n\t"
3333 "lea 0x8(%esp),%esp\n\t"
3334 "pop %eax\n\t"
3335 "pop %ebx\n\t"
3336 /* jmp, but don't trust the assembler to choose the right jump */
3337 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3338 ".Lge_fallthru:\n\t"
3339 "lea 0x8(%esp),%esp\n\t"
3340 "pop %eax\n\t"
3341 "pop %ebx");
3342
3343 if (offset_p)
3344 *offset_p = 20;
3345 if (size_p)
3346 *size_p = 4;
3347}
3348
6a271cae
PA
3349struct emit_ops i386_emit_ops =
3350 {
3351 i386_emit_prologue,
3352 i386_emit_epilogue,
3353 i386_emit_add,
3354 i386_emit_sub,
3355 i386_emit_mul,
3356 i386_emit_lsh,
3357 i386_emit_rsh_signed,
3358 i386_emit_rsh_unsigned,
3359 i386_emit_ext,
3360 i386_emit_log_not,
3361 i386_emit_bit_and,
3362 i386_emit_bit_or,
3363 i386_emit_bit_xor,
3364 i386_emit_bit_not,
3365 i386_emit_equal,
3366 i386_emit_less_signed,
3367 i386_emit_less_unsigned,
3368 i386_emit_ref,
3369 i386_emit_if_goto,
3370 i386_emit_goto,
3371 i386_write_goto_address,
3372 i386_emit_const,
3373 i386_emit_call,
3374 i386_emit_reg,
3375 i386_emit_pop,
3376 i386_emit_stack_flush,
3377 i386_emit_zero_ext,
3378 i386_emit_swap,
3379 i386_emit_stack_adjust,
3380 i386_emit_int_call_1,
6b9801d4
SS
3381 i386_emit_void_call_2,
3382 i386_emit_eq_goto,
3383 i386_emit_ne_goto,
3384 i386_emit_lt_goto,
3385 i386_emit_le_goto,
3386 i386_emit_gt_goto,
3387 i386_emit_ge_goto
6a271cae
PA
3388 };
3389
3390
3391static struct emit_ops *
3392x86_emit_ops (void)
3393{
3394#ifdef __x86_64__
3aee8918 3395 if (is_64bit_tdesc ())
6a271cae
PA
3396 return &amd64_emit_ops;
3397 else
3398#endif
3399 return &i386_emit_ops;
3400}
3401
c2d6af84
PA
3402static int
3403x86_supports_range_stepping (void)
3404{
3405 return 1;
3406}
3407
d0722149
DE
3408/* This is initialized assuming an amd64 target.
3409 x86_arch_setup will correct it for i386 or amd64 targets. */
3410
3411struct linux_target_ops the_low_target =
3412{
3413 x86_arch_setup,
3aee8918
PA
3414 x86_linux_regs_info,
3415 x86_cannot_fetch_register,
3416 x86_cannot_store_register,
c14dfd32 3417 NULL, /* fetch_register */
d0722149
DE
3418 x86_get_pc,
3419 x86_set_pc,
3420 x86_breakpoint,
3421 x86_breakpoint_len,
3422 NULL,
3423 1,
3424 x86_breakpoint_at,
802e8e6d 3425 x86_supports_z_point_type,
aa5ca48f
DE
3426 x86_insert_point,
3427 x86_remove_point,
3428 x86_stopped_by_watchpoint,
3429 x86_stopped_data_address,
d0722149
DE
3430 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3431 native i386 case (no registers smaller than an xfer unit), and are not
3432 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3433 NULL,
3434 NULL,
3435 /* need to fix up i386 siginfo if host is amd64 */
3436 x86_siginfo_fixup,
aa5ca48f
DE
3437 x86_linux_new_process,
3438 x86_linux_new_thread,
1570b33e 3439 x86_linux_prepare_to_resume,
219f2f23 3440 x86_linux_process_qsupported,
fa593d66
PA
3441 x86_supports_tracepoints,
3442 x86_get_thread_area,
6a271cae 3443 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3444 x86_emit_ops,
3445 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3446 x86_supports_range_stepping,
d0722149 3447};
3aee8918
PA
3448
3449void
3450initialize_low_arch (void)
3451{
3452 /* Initialize the Linux target descriptions. */
3453#ifdef __x86_64__
3454 init_registers_amd64_linux ();
3455 init_registers_amd64_avx_linux ();
01f9f808 3456 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3457 init_registers_amd64_mpx_linux ();
3458
3aee8918 3459 init_registers_x32_linux ();
7e5aaa09 3460 init_registers_x32_avx_linux ();
01f9f808 3461 init_registers_x32_avx512_linux ();
3aee8918
PA
3462
3463 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3464 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3465 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3466#endif
3467 init_registers_i386_linux ();
3468 init_registers_i386_mmx_linux ();
3469 init_registers_i386_avx_linux ();
01f9f808 3470 init_registers_i386_avx512_linux ();
a196ebeb 3471 init_registers_i386_mpx_linux ();
3aee8918
PA
3472
3473 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3474 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3475 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3476
3477 initialize_regsets_info (&x86_regsets_info);
3478}
This page took 0.70936 seconds and 4 git commands to generate.