* eval.c (evaluate_subexp_standard): Add some comments.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
ecd75fc8 3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e 28#include "i386-xstate.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
b5737fa9
PA
31/* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33#ifndef ELFMAG0
34#include "elf/common.h"
35#endif
36
58b4daa5 37#include "agent.h"
3aee8918 38#include "tdesc.h"
c144c7a0 39#include "tracepoint.h"
f699aaba 40#include "ax.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
01f9f808
MS
51/* Defined in auto-generated file amd64-avx512-linux.c. */
52void init_registers_amd64_avx512_linux (void);
53extern const struct target_desc *tdesc_amd64_avx512_linux;
54
a196ebeb
WT
55/* Defined in auto-generated file amd64-mpx-linux.c. */
56void init_registers_amd64_mpx_linux (void);
57extern const struct target_desc *tdesc_amd64_mpx_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-linux.c. */
60void init_registers_x32_linux (void);
3aee8918
PA
61extern const struct target_desc *tdesc_x32_linux;
62
4d47af5c
L
63/* Defined in auto-generated file x32-avx-linux.c. */
64void init_registers_x32_avx_linux (void);
3aee8918 65extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 66
01f9f808
MS
67/* Defined in auto-generated file x32-avx512-linux.c. */
68void init_registers_x32_avx512_linux (void);
69extern const struct target_desc *tdesc_x32_avx512_linux;
70
3aee8918
PA
71#endif
72
73/* Defined in auto-generated file i386-linux.c. */
74void init_registers_i386_linux (void);
75extern const struct target_desc *tdesc_i386_linux;
76
77/* Defined in auto-generated file i386-mmx-linux.c. */
78void init_registers_i386_mmx_linux (void);
79extern const struct target_desc *tdesc_i386_mmx_linux;
80
81/* Defined in auto-generated file i386-avx-linux.c. */
82void init_registers_i386_avx_linux (void);
83extern const struct target_desc *tdesc_i386_avx_linux;
84
01f9f808
MS
85/* Defined in auto-generated file i386-avx512-linux.c. */
86void init_registers_i386_avx512_linux (void);
87extern const struct target_desc *tdesc_i386_avx512_linux;
88
a196ebeb
WT
89/* Defined in auto-generated file i386-mpx-linux.c. */
90void init_registers_i386_mpx_linux (void);
91extern const struct target_desc *tdesc_i386_mpx_linux;
92
3aee8918
PA
93#ifdef __x86_64__
94static struct target_desc *tdesc_amd64_linux_no_xml;
95#endif
96static struct target_desc *tdesc_i386_linux_no_xml;
97
1570b33e 98
fa593d66 99static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 100static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 101
1570b33e
L
102/* Backward compatibility for gdb without XML support. */
103
104static const char *xmltarget_i386_linux_no_xml = "@<target>\
105<architecture>i386</architecture>\
106<osabi>GNU/Linux</osabi>\
107</target>";
f6d1620c
L
108
109#ifdef __x86_64__
1570b33e
L
110static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111<architecture>i386:x86-64</architecture>\
112<osabi>GNU/Linux</osabi>\
113</target>";
f6d1620c 114#endif
d0722149
DE
115
116#include <sys/reg.h>
117#include <sys/procfs.h>
118#include <sys/ptrace.h>
1570b33e
L
119#include <sys/uio.h>
120
121#ifndef PTRACE_GETREGSET
122#define PTRACE_GETREGSET 0x4204
123#endif
124
125#ifndef PTRACE_SETREGSET
126#define PTRACE_SETREGSET 0x4205
127#endif
128
d0722149
DE
129
130#ifndef PTRACE_GET_THREAD_AREA
131#define PTRACE_GET_THREAD_AREA 25
132#endif
133
134/* This definition comes from prctl.h, but some kernels may not have it. */
135#ifndef PTRACE_ARCH_PRCTL
136#define PTRACE_ARCH_PRCTL 30
137#endif
138
139/* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141#ifndef ARCH_GET_FS
142#define ARCH_SET_GS 0x1001
143#define ARCH_SET_FS 0x1002
144#define ARCH_GET_FS 0x1003
145#define ARCH_GET_GS 0x1004
146#endif
147
aa5ca48f
DE
148/* Per-process arch-specific data we want to keep. */
149
150struct arch_process_info
151{
152 struct i386_debug_reg_state debug_reg_state;
153};
154
155/* Per-thread arch-specific data we want to keep. */
156
157struct arch_lwp_info
158{
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161};
162
d0722149
DE
163#ifdef __x86_64__
164
165/* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168static /*const*/ int i386_regmap[] =
169{
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174};
175
176#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178/* So code below doesn't have to care, i386 or amd64. */
179#define ORIG_EAX ORIG_RAX
180
181static const int x86_64_regmap[] =
182{
183 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
184 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
185 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
186 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
187 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
188 DS * 8, ES * 8, FS * 8, GS * 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
192 -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 ORIG_RAX * 8,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
206};
207
208#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209
210#else /* ! __x86_64__ */
211
212/* Mapping between the general-purpose registers in `struct user'
213 format and GDB's register array layout. */
214static /*const*/ int i386_regmap[] =
215{
216 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
217 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
218 EIP * 4, EFL * 4, CS * 4, SS * 4,
219 DS * 4, ES * 4, FS * 4, GS * 4
220};
221
222#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
223
224#endif
3aee8918
PA
225
226#ifdef __x86_64__
227
228/* Returns true if the current inferior belongs to a x86-64 process,
229 per the tdesc. */
230
231static int
232is_64bit_tdesc (void)
233{
234 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
235
236 return register_size (regcache->tdesc, 0) == 8;
237}
238
239#endif
240
d0722149
DE
241\f
242/* Called by libthread_db. */
243
244ps_err_e
245ps_get_thread_area (const struct ps_prochandle *ph,
246 lwpid_t lwpid, int idx, void **base)
247{
248#ifdef __x86_64__
3aee8918 249 int use_64bit = is_64bit_tdesc ();
d0722149
DE
250
251 if (use_64bit)
252 {
253 switch (idx)
254 {
255 case FS:
256 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
257 return PS_OK;
258 break;
259 case GS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
261 return PS_OK;
262 break;
263 default:
264 return PS_BADADDR;
265 }
266 return PS_ERR;
267 }
268#endif
269
270 {
271 unsigned int desc[4];
272
273 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
274 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
275 return PS_ERR;
276
d1ec4ce7
DE
277 /* Ensure we properly extend the value to 64-bits for x86_64. */
278 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
279 return PS_OK;
280 }
281}
fa593d66
PA
282
283/* Get the thread area address. This is used to recognize which
284 thread is which when tracing with the in-process agent library. We
285 don't read anything from the address, and treat it as opaque; it's
286 the address itself that we assume is unique per-thread. */
287
288static int
289x86_get_thread_area (int lwpid, CORE_ADDR *addr)
290{
291#ifdef __x86_64__
3aee8918 292 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
293
294 if (use_64bit)
295 {
296 void *base;
297 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
298 {
299 *addr = (CORE_ADDR) (uintptr_t) base;
300 return 0;
301 }
302
303 return -1;
304 }
305#endif
306
307 {
308 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
309 struct thread_info *thr = get_lwp_thread (lwp);
310 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
311 unsigned int desc[4];
312 ULONGEST gs = 0;
313 const int reg_thread_area = 3; /* bits to scale down register value. */
314 int idx;
315
316 collect_register_by_name (regcache, "gs", &gs);
317
318 idx = gs >> reg_thread_area;
319
320 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 321 lwpid_of (thr),
493e2a69 322 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
323 return -1;
324
325 *addr = desc[1];
326 return 0;
327 }
328}
329
330
d0722149
DE
331\f
332static int
3aee8918 333x86_cannot_store_register (int regno)
d0722149 334{
3aee8918
PA
335#ifdef __x86_64__
336 if (is_64bit_tdesc ())
337 return 0;
338#endif
339
d0722149
DE
340 return regno >= I386_NUM_REGS;
341}
342
343static int
3aee8918 344x86_cannot_fetch_register (int regno)
d0722149 345{
3aee8918
PA
346#ifdef __x86_64__
347 if (is_64bit_tdesc ())
348 return 0;
349#endif
350
d0722149
DE
351 return regno >= I386_NUM_REGS;
352}
353
354static void
442ea881 355x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
356{
357 int i;
358
359#ifdef __x86_64__
3aee8918 360 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
361 {
362 for (i = 0; i < X86_64_NUM_REGS; i++)
363 if (x86_64_regmap[i] != -1)
442ea881 364 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
365 return;
366 }
367#endif
368
369 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 370 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 371
442ea881
PA
372 collect_register_by_name (regcache, "orig_eax",
373 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
374}
375
376static void
442ea881 377x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
378{
379 int i;
380
381#ifdef __x86_64__
3aee8918 382 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
383 {
384 for (i = 0; i < X86_64_NUM_REGS; i++)
385 if (x86_64_regmap[i] != -1)
442ea881 386 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
387 return;
388 }
389#endif
390
391 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 392 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 393
442ea881
PA
394 supply_register_by_name (regcache, "orig_eax",
395 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
396}
397
398static void
442ea881 399x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
400{
401#ifdef __x86_64__
442ea881 402 i387_cache_to_fxsave (regcache, buf);
d0722149 403#else
442ea881 404 i387_cache_to_fsave (regcache, buf);
d0722149
DE
405#endif
406}
407
408static void
442ea881 409x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
410{
411#ifdef __x86_64__
442ea881 412 i387_fxsave_to_cache (regcache, buf);
d0722149 413#else
442ea881 414 i387_fsave_to_cache (regcache, buf);
d0722149
DE
415#endif
416}
417
418#ifndef __x86_64__
419
420static void
442ea881 421x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 422{
442ea881 423 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
424}
425
426static void
442ea881 427x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 428{
442ea881 429 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
430}
431
432#endif
433
1570b33e
L
434static void
435x86_fill_xstateregset (struct regcache *regcache, void *buf)
436{
437 i387_cache_to_xsave (regcache, buf);
438}
439
440static void
441x86_store_xstateregset (struct regcache *regcache, const void *buf)
442{
443 i387_xsave_to_cache (regcache, buf);
444}
445
d0722149
DE
446/* ??? The non-biarch i386 case stores all the i387 regs twice.
447 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
448 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
449 doesn't work. IWBN to avoid the duplication in the case where it
450 does work. Maybe the arch_setup routine could check whether it works
3aee8918 451 and update the supported regsets accordingly. */
d0722149 452
3aee8918 453static struct regset_info x86_regsets[] =
d0722149
DE
454{
455#ifdef HAVE_PTRACE_GETREGS
1570b33e 456 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
457 GENERAL_REGS,
458 x86_fill_gregset, x86_store_gregset },
1570b33e
L
459 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
460 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
461# ifndef __x86_64__
462# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 463 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
464 EXTENDED_REGS,
465 x86_fill_fpxregset, x86_store_fpxregset },
466# endif
467# endif
1570b33e 468 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
469 FP_REGS,
470 x86_fill_fpregset, x86_store_fpregset },
471#endif /* HAVE_PTRACE_GETREGS */
1570b33e 472 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
473};
474
475static CORE_ADDR
442ea881 476x86_get_pc (struct regcache *regcache)
d0722149 477{
3aee8918 478 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
479
480 if (use_64bit)
481 {
482 unsigned long pc;
442ea881 483 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
484 return (CORE_ADDR) pc;
485 }
486 else
487 {
488 unsigned int pc;
442ea881 489 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
490 return (CORE_ADDR) pc;
491 }
492}
493
494static void
442ea881 495x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 496{
3aee8918 497 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
498
499 if (use_64bit)
500 {
501 unsigned long newpc = pc;
442ea881 502 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
503 }
504 else
505 {
506 unsigned int newpc = pc;
442ea881 507 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
508 }
509}
510\f
511static const unsigned char x86_breakpoint[] = { 0xCC };
512#define x86_breakpoint_len 1
513
514static int
515x86_breakpoint_at (CORE_ADDR pc)
516{
517 unsigned char c;
518
fc7238bb 519 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
520 if (c == 0xCC)
521 return 1;
522
523 return 0;
524}
525\f
aa5ca48f
DE
526/* Support for debug registers. */
527
528static unsigned long
529x86_linux_dr_get (ptid_t ptid, int regnum)
530{
531 int tid;
532 unsigned long value;
533
534 tid = ptid_get_lwp (ptid);
535
536 errno = 0;
537 value = ptrace (PTRACE_PEEKUSER, tid,
538 offsetof (struct user, u_debugreg[regnum]), 0);
539 if (errno != 0)
540 error ("Couldn't read debug register");
541
542 return value;
543}
544
545static void
546x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
547{
548 int tid;
549
550 tid = ptid_get_lwp (ptid);
551
552 errno = 0;
553 ptrace (PTRACE_POKEUSER, tid,
554 offsetof (struct user, u_debugreg[regnum]), value);
555 if (errno != 0)
556 error ("Couldn't write debug register");
557}
558
964e4306
PA
559static int
560update_debug_registers_callback (struct inferior_list_entry *entry,
561 void *pid_p)
562{
d86d4aaf
DE
563 struct thread_info *thr = (struct thread_info *) entry;
564 struct lwp_info *lwp = get_thread_lwp (thr);
964e4306
PA
565 int pid = *(int *) pid_p;
566
567 /* Only update the threads of this process. */
d86d4aaf 568 if (pid_of (thr) == pid)
964e4306
PA
569 {
570 /* The actual update is done later just before resuming the lwp,
571 we just mark that the registers need updating. */
572 lwp->arch_private->debug_registers_changed = 1;
573
574 /* If the lwp isn't stopped, force it to momentarily pause, so
575 we can update its debug registers. */
576 if (!lwp->stopped)
577 linux_stop_lwp (lwp);
578 }
579
580 return 0;
581}
582
aa5ca48f
DE
583/* Update the inferior's debug register REGNUM from STATE. */
584
585void
586i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
587{
964e4306 588 /* Only update the threads of this process. */
d86d4aaf 589 int pid = pid_of (current_inferior);
aa5ca48f
DE
590
591 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
592 fatal ("Invalid debug register %d", regnum);
593
d86d4aaf 594 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 595}
aa5ca48f 596
964e4306 597/* Return the inferior's debug register REGNUM. */
aa5ca48f 598
964e4306
PA
599CORE_ADDR
600i386_dr_low_get_addr (int regnum)
601{
d86d4aaf 602 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
603
604 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 605 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
606
607 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
608}
609
610/* Update the inferior's DR7 debug control register from STATE. */
611
612void
613i386_dr_low_set_control (const struct i386_debug_reg_state *state)
614{
964e4306 615 /* Only update the threads of this process. */
d86d4aaf 616 int pid = pid_of (current_inferior);
aa5ca48f 617
d86d4aaf 618 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 619}
aa5ca48f 620
964e4306
PA
621/* Return the inferior's DR7 debug control register. */
622
623unsigned
624i386_dr_low_get_control (void)
625{
d86d4aaf 626 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
627
628 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
629}
630
631/* Get the value of the DR6 debug status register from the inferior
632 and record it in STATE. */
633
964e4306
PA
634unsigned
635i386_dr_low_get_status (void)
aa5ca48f 636{
d86d4aaf 637 ptid_t ptid = ptid_of (current_inferior);
aa5ca48f 638
964e4306 639 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
640}
641\f
90d74c30 642/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
643
644static int
645x86_insert_point (char type, CORE_ADDR addr, int len)
646{
647 struct process_info *proc = current_process ();
648 switch (type)
649 {
961bd387 650 case '0': /* software-breakpoint */
90d74c30
PA
651 {
652 int ret;
653
654 ret = prepare_to_access_memory ();
655 if (ret)
656 return -1;
657 ret = set_gdb_breakpoint_at (addr);
0146f85b 658 done_accessing_memory ();
90d74c30
PA
659 return ret;
660 }
961bd387
ME
661 case '1': /* hardware-breakpoint */
662 case '2': /* write watchpoint */
663 case '3': /* read watchpoint */
664 case '4': /* access watchpoint */
a4165e94
PA
665 {
666 enum target_hw_bp_type hw_type = Z_packet_to_hw_type (type);
667 struct i386_debug_reg_state *state
668 = &proc->private->arch_private->debug_reg_state;
669
670 return i386_low_insert_watchpoint (state, hw_type, addr, len);
671 }
961bd387 672
aa5ca48f
DE
673 default:
674 /* Unsupported. */
675 return 1;
676 }
677}
678
679static int
680x86_remove_point (char type, CORE_ADDR addr, int len)
681{
682 struct process_info *proc = current_process ();
683 switch (type)
684 {
961bd387 685 case '0': /* software-breakpoint */
90d74c30
PA
686 {
687 int ret;
688
689 ret = prepare_to_access_memory ();
690 if (ret)
691 return -1;
692 ret = delete_gdb_breakpoint_at (addr);
0146f85b 693 done_accessing_memory ();
90d74c30
PA
694 return ret;
695 }
961bd387
ME
696 case '1': /* hardware-breakpoint */
697 case '2': /* write watchpoint */
698 case '3': /* read watchpoint */
699 case '4': /* access watchpoint */
a4165e94
PA
700 {
701 enum target_hw_bp_type hw_type = Z_packet_to_hw_type (type);
702 struct i386_debug_reg_state *state
703 = &proc->private->arch_private->debug_reg_state;
704
705 return i386_low_remove_watchpoint (state, hw_type, addr, len);
706 }
aa5ca48f
DE
707 default:
708 /* Unsupported. */
709 return 1;
710 }
711}
712
713static int
714x86_stopped_by_watchpoint (void)
715{
716 struct process_info *proc = current_process ();
717 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
718}
719
720static CORE_ADDR
721x86_stopped_data_address (void)
722{
723 struct process_info *proc = current_process ();
724 CORE_ADDR addr;
725 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
726 &addr))
727 return addr;
728 return 0;
729}
730\f
731/* Called when a new process is created. */
732
733static struct arch_process_info *
734x86_linux_new_process (void)
735{
736 struct arch_process_info *info = xcalloc (1, sizeof (*info));
737
738 i386_low_init_dregs (&info->debug_reg_state);
739
740 return info;
741}
742
743/* Called when a new thread is detected. */
744
745static struct arch_lwp_info *
746x86_linux_new_thread (void)
747{
748 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
749
750 info->debug_registers_changed = 1;
751
752 return info;
753}
754
755/* Called when resuming a thread.
756 If the debug regs have changed, update the thread's copies. */
757
758static void
759x86_linux_prepare_to_resume (struct lwp_info *lwp)
760{
d86d4aaf 761 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
6210a125 762 int clear_status = 0;
b9a881c2 763
aa5ca48f
DE
764 if (lwp->arch_private->debug_registers_changed)
765 {
766 int i;
aa5ca48f
DE
767 int pid = ptid_get_pid (ptid);
768 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
769 struct i386_debug_reg_state *state
770 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
771
772 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
773 if (state->dr_ref_count[i] > 0)
774 {
775 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
776
777 /* If we're setting a watchpoint, any change the inferior
778 had done itself to the debug registers needs to be
779 discarded, otherwise, i386_low_stopped_data_address can
780 get confused. */
781 clear_status = 1;
782 }
aa5ca48f
DE
783
784 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
785
786 lwp->arch_private->debug_registers_changed = 0;
787 }
b9a881c2 788
6210a125 789 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 790 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
791}
792\f
d0722149
DE
793/* When GDBSERVER is built as a 64-bit application on linux, the
794 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
795 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
796 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
797 conversion in-place ourselves. */
798
799/* These types below (compat_*) define a siginfo type that is layout
800 compatible with the siginfo type exported by the 32-bit userspace
801 support. */
802
803#ifdef __x86_64__
804
805typedef int compat_int_t;
806typedef unsigned int compat_uptr_t;
807
808typedef int compat_time_t;
809typedef int compat_timer_t;
810typedef int compat_clock_t;
811
812struct compat_timeval
813{
814 compat_time_t tv_sec;
815 int tv_usec;
816};
817
818typedef union compat_sigval
819{
820 compat_int_t sival_int;
821 compat_uptr_t sival_ptr;
822} compat_sigval_t;
823
824typedef struct compat_siginfo
825{
826 int si_signo;
827 int si_errno;
828 int si_code;
829
830 union
831 {
832 int _pad[((128 / sizeof (int)) - 3)];
833
834 /* kill() */
835 struct
836 {
837 unsigned int _pid;
838 unsigned int _uid;
839 } _kill;
840
841 /* POSIX.1b timers */
842 struct
843 {
844 compat_timer_t _tid;
845 int _overrun;
846 compat_sigval_t _sigval;
847 } _timer;
848
849 /* POSIX.1b signals */
850 struct
851 {
852 unsigned int _pid;
853 unsigned int _uid;
854 compat_sigval_t _sigval;
855 } _rt;
856
857 /* SIGCHLD */
858 struct
859 {
860 unsigned int _pid;
861 unsigned int _uid;
862 int _status;
863 compat_clock_t _utime;
864 compat_clock_t _stime;
865 } _sigchld;
866
867 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
868 struct
869 {
870 unsigned int _addr;
871 } _sigfault;
872
873 /* SIGPOLL */
874 struct
875 {
876 int _band;
877 int _fd;
878 } _sigpoll;
879 } _sifields;
880} compat_siginfo_t;
881
c92b5177
L
882/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
883typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
884
885typedef struct compat_x32_siginfo
886{
887 int si_signo;
888 int si_errno;
889 int si_code;
890
891 union
892 {
893 int _pad[((128 / sizeof (int)) - 3)];
894
895 /* kill() */
896 struct
897 {
898 unsigned int _pid;
899 unsigned int _uid;
900 } _kill;
901
902 /* POSIX.1b timers */
903 struct
904 {
905 compat_timer_t _tid;
906 int _overrun;
907 compat_sigval_t _sigval;
908 } _timer;
909
910 /* POSIX.1b signals */
911 struct
912 {
913 unsigned int _pid;
914 unsigned int _uid;
915 compat_sigval_t _sigval;
916 } _rt;
917
918 /* SIGCHLD */
919 struct
920 {
921 unsigned int _pid;
922 unsigned int _uid;
923 int _status;
924 compat_x32_clock_t _utime;
925 compat_x32_clock_t _stime;
926 } _sigchld;
927
928 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
929 struct
930 {
931 unsigned int _addr;
932 } _sigfault;
933
934 /* SIGPOLL */
935 struct
936 {
937 int _band;
938 int _fd;
939 } _sigpoll;
940 } _sifields;
941} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
942
d0722149
DE
943#define cpt_si_pid _sifields._kill._pid
944#define cpt_si_uid _sifields._kill._uid
945#define cpt_si_timerid _sifields._timer._tid
946#define cpt_si_overrun _sifields._timer._overrun
947#define cpt_si_status _sifields._sigchld._status
948#define cpt_si_utime _sifields._sigchld._utime
949#define cpt_si_stime _sifields._sigchld._stime
950#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
951#define cpt_si_addr _sifields._sigfault._addr
952#define cpt_si_band _sifields._sigpoll._band
953#define cpt_si_fd _sifields._sigpoll._fd
954
955/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
956 In their place is si_timer1,si_timer2. */
957#ifndef si_timerid
958#define si_timerid si_timer1
959#endif
960#ifndef si_overrun
961#define si_overrun si_timer2
962#endif
963
964static void
965compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
966{
967 memset (to, 0, sizeof (*to));
968
969 to->si_signo = from->si_signo;
970 to->si_errno = from->si_errno;
971 to->si_code = from->si_code;
972
b53a1623 973 if (to->si_code == SI_TIMER)
d0722149 974 {
b53a1623
PA
975 to->cpt_si_timerid = from->si_timerid;
976 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
977 to->cpt_si_ptr = (intptr_t) from->si_ptr;
978 }
979 else if (to->si_code == SI_USER)
980 {
981 to->cpt_si_pid = from->si_pid;
982 to->cpt_si_uid = from->si_uid;
983 }
b53a1623 984 else if (to->si_code < 0)
d0722149 985 {
b53a1623
PA
986 to->cpt_si_pid = from->si_pid;
987 to->cpt_si_uid = from->si_uid;
d0722149
DE
988 to->cpt_si_ptr = (intptr_t) from->si_ptr;
989 }
990 else
991 {
992 switch (to->si_signo)
993 {
994 case SIGCHLD:
995 to->cpt_si_pid = from->si_pid;
996 to->cpt_si_uid = from->si_uid;
997 to->cpt_si_status = from->si_status;
998 to->cpt_si_utime = from->si_utime;
999 to->cpt_si_stime = from->si_stime;
1000 break;
1001 case SIGILL:
1002 case SIGFPE:
1003 case SIGSEGV:
1004 case SIGBUS:
1005 to->cpt_si_addr = (intptr_t) from->si_addr;
1006 break;
1007 case SIGPOLL:
1008 to->cpt_si_band = from->si_band;
1009 to->cpt_si_fd = from->si_fd;
1010 break;
1011 default:
1012 to->cpt_si_pid = from->si_pid;
1013 to->cpt_si_uid = from->si_uid;
1014 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1015 break;
1016 }
1017 }
1018}
1019
1020static void
1021siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1022{
1023 memset (to, 0, sizeof (*to));
1024
1025 to->si_signo = from->si_signo;
1026 to->si_errno = from->si_errno;
1027 to->si_code = from->si_code;
1028
b53a1623 1029 if (to->si_code == SI_TIMER)
d0722149 1030 {
b53a1623
PA
1031 to->si_timerid = from->cpt_si_timerid;
1032 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1033 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1034 }
1035 else if (to->si_code == SI_USER)
1036 {
1037 to->si_pid = from->cpt_si_pid;
1038 to->si_uid = from->cpt_si_uid;
1039 }
b53a1623 1040 else if (to->si_code < 0)
d0722149 1041 {
b53a1623
PA
1042 to->si_pid = from->cpt_si_pid;
1043 to->si_uid = from->cpt_si_uid;
d0722149
DE
1044 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1045 }
1046 else
1047 {
1048 switch (to->si_signo)
1049 {
1050 case SIGCHLD:
1051 to->si_pid = from->cpt_si_pid;
1052 to->si_uid = from->cpt_si_uid;
1053 to->si_status = from->cpt_si_status;
1054 to->si_utime = from->cpt_si_utime;
1055 to->si_stime = from->cpt_si_stime;
1056 break;
1057 case SIGILL:
1058 case SIGFPE:
1059 case SIGSEGV:
1060 case SIGBUS:
1061 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1062 break;
1063 case SIGPOLL:
1064 to->si_band = from->cpt_si_band;
1065 to->si_fd = from->cpt_si_fd;
1066 break;
1067 default:
1068 to->si_pid = from->cpt_si_pid;
1069 to->si_uid = from->cpt_si_uid;
1070 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1071 break;
1072 }
1073 }
1074}
1075
c92b5177
L
1076static void
1077compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1078 siginfo_t *from)
1079{
1080 memset (to, 0, sizeof (*to));
1081
1082 to->si_signo = from->si_signo;
1083 to->si_errno = from->si_errno;
1084 to->si_code = from->si_code;
1085
1086 if (to->si_code == SI_TIMER)
1087 {
1088 to->cpt_si_timerid = from->si_timerid;
1089 to->cpt_si_overrun = from->si_overrun;
1090 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1091 }
1092 else if (to->si_code == SI_USER)
1093 {
1094 to->cpt_si_pid = from->si_pid;
1095 to->cpt_si_uid = from->si_uid;
1096 }
1097 else if (to->si_code < 0)
1098 {
1099 to->cpt_si_pid = from->si_pid;
1100 to->cpt_si_uid = from->si_uid;
1101 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1102 }
1103 else
1104 {
1105 switch (to->si_signo)
1106 {
1107 case SIGCHLD:
1108 to->cpt_si_pid = from->si_pid;
1109 to->cpt_si_uid = from->si_uid;
1110 to->cpt_si_status = from->si_status;
1111 to->cpt_si_utime = from->si_utime;
1112 to->cpt_si_stime = from->si_stime;
1113 break;
1114 case SIGILL:
1115 case SIGFPE:
1116 case SIGSEGV:
1117 case SIGBUS:
1118 to->cpt_si_addr = (intptr_t) from->si_addr;
1119 break;
1120 case SIGPOLL:
1121 to->cpt_si_band = from->si_band;
1122 to->cpt_si_fd = from->si_fd;
1123 break;
1124 default:
1125 to->cpt_si_pid = from->si_pid;
1126 to->cpt_si_uid = from->si_uid;
1127 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1128 break;
1129 }
1130 }
1131}
1132
1133static void
1134siginfo_from_compat_x32_siginfo (siginfo_t *to,
1135 compat_x32_siginfo_t *from)
1136{
1137 memset (to, 0, sizeof (*to));
1138
1139 to->si_signo = from->si_signo;
1140 to->si_errno = from->si_errno;
1141 to->si_code = from->si_code;
1142
1143 if (to->si_code == SI_TIMER)
1144 {
1145 to->si_timerid = from->cpt_si_timerid;
1146 to->si_overrun = from->cpt_si_overrun;
1147 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1148 }
1149 else if (to->si_code == SI_USER)
1150 {
1151 to->si_pid = from->cpt_si_pid;
1152 to->si_uid = from->cpt_si_uid;
1153 }
1154 else if (to->si_code < 0)
1155 {
1156 to->si_pid = from->cpt_si_pid;
1157 to->si_uid = from->cpt_si_uid;
1158 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1159 }
1160 else
1161 {
1162 switch (to->si_signo)
1163 {
1164 case SIGCHLD:
1165 to->si_pid = from->cpt_si_pid;
1166 to->si_uid = from->cpt_si_uid;
1167 to->si_status = from->cpt_si_status;
1168 to->si_utime = from->cpt_si_utime;
1169 to->si_stime = from->cpt_si_stime;
1170 break;
1171 case SIGILL:
1172 case SIGFPE:
1173 case SIGSEGV:
1174 case SIGBUS:
1175 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1176 break;
1177 case SIGPOLL:
1178 to->si_band = from->cpt_si_band;
1179 to->si_fd = from->cpt_si_fd;
1180 break;
1181 default:
1182 to->si_pid = from->cpt_si_pid;
1183 to->si_uid = from->cpt_si_uid;
1184 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1185 break;
1186 }
1187 }
1188}
1189
d0722149
DE
1190#endif /* __x86_64__ */
1191
1192/* Convert a native/host siginfo object, into/from the siginfo in the
1193 layout of the inferiors' architecture. Returns true if any
1194 conversion was done; false otherwise. If DIRECTION is 1, then copy
1195 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1196 INF. */
1197
1198static int
a5362b9a 1199x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1200{
1201#ifdef __x86_64__
760256f9 1202 unsigned int machine;
d86d4aaf 1203 int tid = lwpid_of (current_inferior);
760256f9
PA
1204 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1205
d0722149 1206 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1207 if (!is_64bit_tdesc ())
d0722149 1208 {
a5362b9a 1209 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1210 fatal ("unexpected difference in siginfo");
d0722149
DE
1211
1212 if (direction == 0)
1213 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1214 else
1215 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1216
c92b5177
L
1217 return 1;
1218 }
1219 /* No fixup for native x32 GDB. */
760256f9 1220 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177
L
1221 {
1222 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1223 fatal ("unexpected difference in siginfo");
1224
1225 if (direction == 0)
1226 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1227 native);
1228 else
1229 siginfo_from_compat_x32_siginfo (native,
1230 (struct compat_x32_siginfo *) inf);
1231
d0722149
DE
1232 return 1;
1233 }
1234#endif
1235
1236 return 0;
1237}
1238\f
1570b33e
L
1239static int use_xml;
1240
3aee8918
PA
1241/* Format of XSAVE extended state is:
1242 struct
1243 {
1244 fxsave_bytes[0..463]
1245 sw_usable_bytes[464..511]
1246 xstate_hdr_bytes[512..575]
1247 avx_bytes[576..831]
1248 future_state etc
1249 };
1250
1251 Same memory layout will be used for the coredump NT_X86_XSTATE
1252 representing the XSAVE extended state registers.
1253
1254 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1255 extended state mask, which is the same as the extended control register
1256 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1257 together with the mask saved in the xstate_hdr_bytes to determine what
1258 states the processor/OS supports and what state, used or initialized,
1259 the process/thread is in. */
1260#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1261
1262/* Does the current host support the GETFPXREGS request? The header
1263 file may or may not define it, and even if it is defined, the
1264 kernel will return EIO if it's running on a pre-SSE processor. */
1265int have_ptrace_getfpxregs =
1266#ifdef HAVE_PTRACE_GETFPXREGS
1267 -1
1268#else
1269 0
1270#endif
1271;
1570b33e 1272
3aee8918
PA
1273/* Does the current host support PTRACE_GETREGSET? */
1274static int have_ptrace_getregset = -1;
1275
1276/* Get Linux/x86 target description from running target. */
1277
1278static const struct target_desc *
1279x86_linux_read_description (void)
1570b33e 1280{
3aee8918
PA
1281 unsigned int machine;
1282 int is_elf64;
a196ebeb 1283 int xcr0_features;
3aee8918
PA
1284 int tid;
1285 static uint64_t xcr0;
3a13a53b 1286 struct regset_info *regset;
1570b33e 1287
d86d4aaf 1288 tid = lwpid_of (current_inferior);
1570b33e 1289
3aee8918 1290 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1291
3aee8918 1292 if (sizeof (void *) == 4)
3a13a53b 1293 {
3aee8918
PA
1294 if (is_elf64 > 0)
1295 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1296#ifndef __x86_64__
1297 else if (machine == EM_X86_64)
1298 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1299#endif
1300 }
3a13a53b 1301
3aee8918
PA
1302#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1303 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1304 {
1305 elf_fpxregset_t fpxregs;
3a13a53b 1306
3aee8918 1307 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1308 {
3aee8918
PA
1309 have_ptrace_getfpxregs = 0;
1310 have_ptrace_getregset = 0;
1311 return tdesc_i386_mmx_linux;
3a13a53b 1312 }
3aee8918
PA
1313 else
1314 have_ptrace_getfpxregs = 1;
3a13a53b 1315 }
1570b33e
L
1316#endif
1317
1318 if (!use_xml)
1319 {
3aee8918
PA
1320 x86_xcr0 = I386_XSTATE_SSE_MASK;
1321
1570b33e
L
1322 /* Don't use XML. */
1323#ifdef __x86_64__
3aee8918
PA
1324 if (machine == EM_X86_64)
1325 return tdesc_amd64_linux_no_xml;
1570b33e 1326 else
1570b33e 1327#endif
3aee8918 1328 return tdesc_i386_linux_no_xml;
1570b33e
L
1329 }
1330
1570b33e
L
1331 if (have_ptrace_getregset == -1)
1332 {
3aee8918 1333 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1334 struct iovec iov;
1570b33e
L
1335
1336 iov.iov_base = xstateregs;
1337 iov.iov_len = sizeof (xstateregs);
1338
1339 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1340 if (ptrace (PTRACE_GETREGSET, tid,
1341 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1342 have_ptrace_getregset = 0;
1343 else
1570b33e 1344 {
3aee8918
PA
1345 have_ptrace_getregset = 1;
1346
1347 /* Get XCR0 from XSAVE extended state. */
1348 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1349 / sizeof (uint64_t))];
1350
1351 /* Use PTRACE_GETREGSET if it is available. */
1352 for (regset = x86_regsets;
1353 regset->fill_function != NULL; regset++)
1354 if (regset->get_request == PTRACE_GETREGSET)
1355 regset->size = I386_XSTATE_SIZE (xcr0);
1356 else if (regset->type != GENERAL_REGS)
1357 regset->size = 0;
1570b33e 1358 }
1570b33e
L
1359 }
1360
3aee8918 1361 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb
WT
1362 xcr0_features = (have_ptrace_getregset
1363 && (xcr0 & I386_XSTATE_ALL_MASK));
3aee8918 1364
a196ebeb 1365 if (xcr0_features)
3aee8918 1366 x86_xcr0 = xcr0;
1570b33e 1367
3aee8918
PA
1368 if (machine == EM_X86_64)
1369 {
1570b33e 1370#ifdef __x86_64__
a196ebeb 1371 if (is_elf64)
3aee8918 1372 {
a196ebeb
WT
1373 if (xcr0_features)
1374 {
1375 switch (xcr0 & I386_XSTATE_ALL_MASK)
1376 {
01f9f808
MS
1377 case I386_XSTATE_AVX512_MASK:
1378 return tdesc_amd64_avx512_linux;
1379
a196ebeb
WT
1380 case I386_XSTATE_MPX_MASK:
1381 return tdesc_amd64_mpx_linux;
1382
1383 case I386_XSTATE_AVX_MASK:
1384 return tdesc_amd64_avx_linux;
1385
1386 default:
1387 return tdesc_amd64_linux;
1388 }
1389 }
4d47af5c 1390 else
a196ebeb 1391 return tdesc_amd64_linux;
3aee8918
PA
1392 }
1393 else
1394 {
a196ebeb
WT
1395 if (xcr0_features)
1396 {
1397 switch (xcr0 & I386_XSTATE_ALL_MASK)
1398 {
01f9f808
MS
1399 case I386_XSTATE_AVX512_MASK:
1400 return tdesc_x32_avx512_linux;
1401
a196ebeb
WT
1402 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1403 case I386_XSTATE_AVX_MASK:
1404 return tdesc_x32_avx_linux;
1405
1406 default:
1407 return tdesc_x32_linux;
1408 }
1409 }
3aee8918 1410 else
a196ebeb 1411 return tdesc_x32_linux;
1570b33e 1412 }
3aee8918 1413#endif
1570b33e 1414 }
3aee8918
PA
1415 else
1416 {
a196ebeb
WT
1417 if (xcr0_features)
1418 {
1419 switch (xcr0 & I386_XSTATE_ALL_MASK)
1420 {
01f9f808
MS
1421 case (I386_XSTATE_AVX512_MASK):
1422 return tdesc_i386_avx512_linux;
1423
a196ebeb
WT
1424 case (I386_XSTATE_MPX_MASK):
1425 return tdesc_i386_mpx_linux;
1426
1427 case (I386_XSTATE_AVX_MASK):
1428 return tdesc_i386_avx_linux;
1429
1430 default:
1431 return tdesc_i386_linux;
1432 }
1433 }
3aee8918
PA
1434 else
1435 return tdesc_i386_linux;
1436 }
1437
1438 gdb_assert_not_reached ("failed to return tdesc");
1439}
1440
1441/* Callback for find_inferior. Stops iteration when a thread with a
1442 given PID is found. */
1443
1444static int
1445same_process_callback (struct inferior_list_entry *entry, void *data)
1446{
1447 int pid = *(int *) data;
1448
1449 return (ptid_get_pid (entry->id) == pid);
1450}
1451
1452/* Callback for for_each_inferior. Calls the arch_setup routine for
1453 each process. */
1454
1455static void
1456x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1457{
1458 int pid = ptid_get_pid (entry->id);
1459
1460 /* Look up any thread of this processes. */
1461 current_inferior
1462 = (struct thread_info *) find_inferior (&all_threads,
1463 same_process_callback, &pid);
1464
1465 the_low_target.arch_setup ();
1466}
1467
1468/* Update all the target description of all processes; a new GDB
1469 connected, and it may or not support xml target descriptions. */
1470
1471static void
1472x86_linux_update_xmltarget (void)
1473{
1474 struct thread_info *save_inferior = current_inferior;
1475
1476 /* Before changing the register cache's internal layout, flush the
1477 contents of the current valid caches back to the threads, and
1478 release the current regcache objects. */
1479 regcache_release ();
1480
1481 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1482
1483 current_inferior = save_inferior;
1570b33e
L
1484}
1485
1486/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1487 PTRACE_GETREGSET. */
1488
1489static void
1490x86_linux_process_qsupported (const char *query)
1491{
1492 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1493 with "i386" in qSupported query, it supports x86 XML target
1494 descriptions. */
1495 use_xml = 0;
1496 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1497 {
1498 char *copy = xstrdup (query + 13);
1499 char *p;
1500
1501 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1502 {
1503 if (strcmp (p, "i386") == 0)
1504 {
1505 use_xml = 1;
1506 break;
1507 }
1508 }
1509
1510 free (copy);
1511 }
1512
1513 x86_linux_update_xmltarget ();
1514}
1515
3aee8918 1516/* Common for x86/x86-64. */
d0722149 1517
3aee8918
PA
1518static struct regsets_info x86_regsets_info =
1519 {
1520 x86_regsets, /* regsets */
1521 0, /* num_regsets */
1522 NULL, /* disabled_regsets */
1523 };
214d508e
L
1524
1525#ifdef __x86_64__
3aee8918
PA
1526static struct regs_info amd64_linux_regs_info =
1527 {
1528 NULL, /* regset_bitmap */
1529 NULL, /* usrregs_info */
1530 &x86_regsets_info
1531 };
d0722149 1532#endif
3aee8918
PA
1533static struct usrregs_info i386_linux_usrregs_info =
1534 {
1535 I386_NUM_REGS,
1536 i386_regmap,
1537 };
d0722149 1538
3aee8918
PA
1539static struct regs_info i386_linux_regs_info =
1540 {
1541 NULL, /* regset_bitmap */
1542 &i386_linux_usrregs_info,
1543 &x86_regsets_info
1544 };
d0722149 1545
3aee8918
PA
1546const struct regs_info *
1547x86_linux_regs_info (void)
1548{
1549#ifdef __x86_64__
1550 if (is_64bit_tdesc ())
1551 return &amd64_linux_regs_info;
1552 else
1553#endif
1554 return &i386_linux_regs_info;
1555}
d0722149 1556
3aee8918
PA
1557/* Initialize the target description for the architecture of the
1558 inferior. */
1570b33e 1559
3aee8918
PA
1560static void
1561x86_arch_setup (void)
1562{
1563 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1564}
1565
219f2f23
PA
1566static int
1567x86_supports_tracepoints (void)
1568{
1569 return 1;
1570}
1571
fa593d66
PA
1572static void
1573append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1574{
1575 write_inferior_memory (*to, buf, len);
1576 *to += len;
1577}
1578
1579static int
1580push_opcode (unsigned char *buf, char *op)
1581{
1582 unsigned char *buf_org = buf;
1583
1584 while (1)
1585 {
1586 char *endptr;
1587 unsigned long ul = strtoul (op, &endptr, 16);
1588
1589 if (endptr == op)
1590 break;
1591
1592 *buf++ = ul;
1593 op = endptr;
1594 }
1595
1596 return buf - buf_org;
1597}
1598
1599#ifdef __x86_64__
1600
1601/* Build a jump pad that saves registers and calls a collection
1602 function. Writes a jump instruction to the jump pad to
1603 JJUMPAD_INSN. The caller is responsible to write it in at the
1604 tracepoint address. */
1605
1606static int
1607amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1608 CORE_ADDR collector,
1609 CORE_ADDR lockaddr,
1610 ULONGEST orig_size,
1611 CORE_ADDR *jump_entry,
405f8e94
SS
1612 CORE_ADDR *trampoline,
1613 ULONGEST *trampoline_size,
fa593d66
PA
1614 unsigned char *jjump_pad_insn,
1615 ULONGEST *jjump_pad_insn_size,
1616 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1617 CORE_ADDR *adjusted_insn_addr_end,
1618 char *err)
fa593d66
PA
1619{
1620 unsigned char buf[40];
1621 int i, offset;
f4647387
YQ
1622 int64_t loffset;
1623
fa593d66
PA
1624 CORE_ADDR buildaddr = *jump_entry;
1625
1626 /* Build the jump pad. */
1627
1628 /* First, do tracepoint data collection. Save registers. */
1629 i = 0;
1630 /* Need to ensure stack pointer saved first. */
1631 buf[i++] = 0x54; /* push %rsp */
1632 buf[i++] = 0x55; /* push %rbp */
1633 buf[i++] = 0x57; /* push %rdi */
1634 buf[i++] = 0x56; /* push %rsi */
1635 buf[i++] = 0x52; /* push %rdx */
1636 buf[i++] = 0x51; /* push %rcx */
1637 buf[i++] = 0x53; /* push %rbx */
1638 buf[i++] = 0x50; /* push %rax */
1639 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1640 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1641 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1642 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1643 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1644 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1645 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1646 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1647 buf[i++] = 0x9c; /* pushfq */
1648 buf[i++] = 0x48; /* movl <addr>,%rdi */
1649 buf[i++] = 0xbf;
1650 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1651 i += sizeof (unsigned long);
1652 buf[i++] = 0x57; /* push %rdi */
1653 append_insns (&buildaddr, i, buf);
1654
1655 /* Stack space for the collecting_t object. */
1656 i = 0;
1657 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1658 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1659 memcpy (buf + i, &tpoint, 8);
1660 i += 8;
1661 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1662 i += push_opcode (&buf[i],
1663 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1664 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1665 append_insns (&buildaddr, i, buf);
1666
1667 /* spin-lock. */
1668 i = 0;
1669 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1670 memcpy (&buf[i], (void *) &lockaddr, 8);
1671 i += 8;
1672 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1673 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1674 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1675 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1676 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1677 append_insns (&buildaddr, i, buf);
1678
1679 /* Set up the gdb_collect call. */
1680 /* At this point, (stack pointer + 0x18) is the base of our saved
1681 register block. */
1682
1683 i = 0;
1684 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1685 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1686
1687 /* tpoint address may be 64-bit wide. */
1688 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1689 memcpy (buf + i, &tpoint, 8);
1690 i += 8;
1691 append_insns (&buildaddr, i, buf);
1692
1693 /* The collector function being in the shared library, may be
1694 >31-bits away off the jump pad. */
1695 i = 0;
1696 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1697 memcpy (buf + i, &collector, 8);
1698 i += 8;
1699 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1700 append_insns (&buildaddr, i, buf);
1701
1702 /* Clear the spin-lock. */
1703 i = 0;
1704 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1705 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1706 memcpy (buf + i, &lockaddr, 8);
1707 i += 8;
1708 append_insns (&buildaddr, i, buf);
1709
1710 /* Remove stack that had been used for the collect_t object. */
1711 i = 0;
1712 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1713 append_insns (&buildaddr, i, buf);
1714
1715 /* Restore register state. */
1716 i = 0;
1717 buf[i++] = 0x48; /* add $0x8,%rsp */
1718 buf[i++] = 0x83;
1719 buf[i++] = 0xc4;
1720 buf[i++] = 0x08;
1721 buf[i++] = 0x9d; /* popfq */
1722 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1723 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1724 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1725 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1726 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1727 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1728 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1729 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1730 buf[i++] = 0x58; /* pop %rax */
1731 buf[i++] = 0x5b; /* pop %rbx */
1732 buf[i++] = 0x59; /* pop %rcx */
1733 buf[i++] = 0x5a; /* pop %rdx */
1734 buf[i++] = 0x5e; /* pop %rsi */
1735 buf[i++] = 0x5f; /* pop %rdi */
1736 buf[i++] = 0x5d; /* pop %rbp */
1737 buf[i++] = 0x5c; /* pop %rsp */
1738 append_insns (&buildaddr, i, buf);
1739
1740 /* Now, adjust the original instruction to execute in the jump
1741 pad. */
1742 *adjusted_insn_addr = buildaddr;
1743 relocate_instruction (&buildaddr, tpaddr);
1744 *adjusted_insn_addr_end = buildaddr;
1745
1746 /* Finally, write a jump back to the program. */
f4647387
YQ
1747
1748 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1749 if (loffset > INT_MAX || loffset < INT_MIN)
1750 {
1751 sprintf (err,
1752 "E.Jump back from jump pad too far from tracepoint "
1753 "(offset 0x%" PRIx64 " > int32).", loffset);
1754 return 1;
1755 }
1756
1757 offset = (int) loffset;
fa593d66
PA
1758 memcpy (buf, jump_insn, sizeof (jump_insn));
1759 memcpy (buf + 1, &offset, 4);
1760 append_insns (&buildaddr, sizeof (jump_insn), buf);
1761
1762 /* The jump pad is now built. Wire in a jump to our jump pad. This
1763 is always done last (by our caller actually), so that we can
1764 install fast tracepoints with threads running. This relies on
1765 the agent's atomic write support. */
f4647387
YQ
1766 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1767 if (loffset > INT_MAX || loffset < INT_MIN)
1768 {
1769 sprintf (err,
1770 "E.Jump pad too far from tracepoint "
1771 "(offset 0x%" PRIx64 " > int32).", loffset);
1772 return 1;
1773 }
1774
1775 offset = (int) loffset;
1776
fa593d66
PA
1777 memcpy (buf, jump_insn, sizeof (jump_insn));
1778 memcpy (buf + 1, &offset, 4);
1779 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1780 *jjump_pad_insn_size = sizeof (jump_insn);
1781
1782 /* Return the end address of our pad. */
1783 *jump_entry = buildaddr;
1784
1785 return 0;
1786}
1787
1788#endif /* __x86_64__ */
1789
1790/* Build a jump pad that saves registers and calls a collection
1791 function. Writes a jump instruction to the jump pad to
1792 JJUMPAD_INSN. The caller is responsible to write it in at the
1793 tracepoint address. */
1794
1795static int
1796i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1797 CORE_ADDR collector,
1798 CORE_ADDR lockaddr,
1799 ULONGEST orig_size,
1800 CORE_ADDR *jump_entry,
405f8e94
SS
1801 CORE_ADDR *trampoline,
1802 ULONGEST *trampoline_size,
fa593d66
PA
1803 unsigned char *jjump_pad_insn,
1804 ULONGEST *jjump_pad_insn_size,
1805 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1806 CORE_ADDR *adjusted_insn_addr_end,
1807 char *err)
fa593d66
PA
1808{
1809 unsigned char buf[0x100];
1810 int i, offset;
1811 CORE_ADDR buildaddr = *jump_entry;
1812
1813 /* Build the jump pad. */
1814
1815 /* First, do tracepoint data collection. Save registers. */
1816 i = 0;
1817 buf[i++] = 0x60; /* pushad */
1818 buf[i++] = 0x68; /* push tpaddr aka $pc */
1819 *((int *)(buf + i)) = (int) tpaddr;
1820 i += 4;
1821 buf[i++] = 0x9c; /* pushf */
1822 buf[i++] = 0x1e; /* push %ds */
1823 buf[i++] = 0x06; /* push %es */
1824 buf[i++] = 0x0f; /* push %fs */
1825 buf[i++] = 0xa0;
1826 buf[i++] = 0x0f; /* push %gs */
1827 buf[i++] = 0xa8;
1828 buf[i++] = 0x16; /* push %ss */
1829 buf[i++] = 0x0e; /* push %cs */
1830 append_insns (&buildaddr, i, buf);
1831
1832 /* Stack space for the collecting_t object. */
1833 i = 0;
1834 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1835
1836 /* Build the object. */
1837 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1838 memcpy (buf + i, &tpoint, 4);
1839 i += 4;
1840 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1841
1842 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1843 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1844 append_insns (&buildaddr, i, buf);
1845
1846 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1847 If we cared for it, this could be using xchg alternatively. */
1848
1849 i = 0;
1850 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1851 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1852 %esp,<lockaddr> */
1853 memcpy (&buf[i], (void *) &lockaddr, 4);
1854 i += 4;
1855 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1856 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1857 append_insns (&buildaddr, i, buf);
1858
1859
1860 /* Set up arguments to the gdb_collect call. */
1861 i = 0;
1862 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1863 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1864 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1865 append_insns (&buildaddr, i, buf);
1866
1867 i = 0;
1868 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1869 append_insns (&buildaddr, i, buf);
1870
1871 i = 0;
1872 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1873 memcpy (&buf[i], (void *) &tpoint, 4);
1874 i += 4;
1875 append_insns (&buildaddr, i, buf);
1876
1877 buf[0] = 0xe8; /* call <reladdr> */
1878 offset = collector - (buildaddr + sizeof (jump_insn));
1879 memcpy (buf + 1, &offset, 4);
1880 append_insns (&buildaddr, 5, buf);
1881 /* Clean up after the call. */
1882 buf[0] = 0x83; /* add $0x8,%esp */
1883 buf[1] = 0xc4;
1884 buf[2] = 0x08;
1885 append_insns (&buildaddr, 3, buf);
1886
1887
1888 /* Clear the spin-lock. This would need the LOCK prefix on older
1889 broken archs. */
1890 i = 0;
1891 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1892 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1893 memcpy (buf + i, &lockaddr, 4);
1894 i += 4;
1895 append_insns (&buildaddr, i, buf);
1896
1897
1898 /* Remove stack that had been used for the collect_t object. */
1899 i = 0;
1900 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1901 append_insns (&buildaddr, i, buf);
1902
1903 i = 0;
1904 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1905 buf[i++] = 0xc4;
1906 buf[i++] = 0x04;
1907 buf[i++] = 0x17; /* pop %ss */
1908 buf[i++] = 0x0f; /* pop %gs */
1909 buf[i++] = 0xa9;
1910 buf[i++] = 0x0f; /* pop %fs */
1911 buf[i++] = 0xa1;
1912 buf[i++] = 0x07; /* pop %es */
405f8e94 1913 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1914 buf[i++] = 0x9d; /* popf */
1915 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1916 buf[i++] = 0xc4;
1917 buf[i++] = 0x04;
1918 buf[i++] = 0x61; /* popad */
1919 append_insns (&buildaddr, i, buf);
1920
1921 /* Now, adjust the original instruction to execute in the jump
1922 pad. */
1923 *adjusted_insn_addr = buildaddr;
1924 relocate_instruction (&buildaddr, tpaddr);
1925 *adjusted_insn_addr_end = buildaddr;
1926
1927 /* Write the jump back to the program. */
1928 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1929 memcpy (buf, jump_insn, sizeof (jump_insn));
1930 memcpy (buf + 1, &offset, 4);
1931 append_insns (&buildaddr, sizeof (jump_insn), buf);
1932
1933 /* The jump pad is now built. Wire in a jump to our jump pad. This
1934 is always done last (by our caller actually), so that we can
1935 install fast tracepoints with threads running. This relies on
1936 the agent's atomic write support. */
405f8e94
SS
1937 if (orig_size == 4)
1938 {
1939 /* Create a trampoline. */
1940 *trampoline_size = sizeof (jump_insn);
1941 if (!claim_trampoline_space (*trampoline_size, trampoline))
1942 {
1943 /* No trampoline space available. */
1944 strcpy (err,
1945 "E.Cannot allocate trampoline space needed for fast "
1946 "tracepoints on 4-byte instructions.");
1947 return 1;
1948 }
1949
1950 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1951 memcpy (buf, jump_insn, sizeof (jump_insn));
1952 memcpy (buf + 1, &offset, 4);
1953 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1954
1955 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1956 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1957 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1958 memcpy (buf + 2, &offset, 2);
1959 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1960 *jjump_pad_insn_size = sizeof (small_jump_insn);
1961 }
1962 else
1963 {
1964 /* Else use a 32-bit relative jump instruction. */
1965 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1966 memcpy (buf, jump_insn, sizeof (jump_insn));
1967 memcpy (buf + 1, &offset, 4);
1968 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1969 *jjump_pad_insn_size = sizeof (jump_insn);
1970 }
fa593d66
PA
1971
1972 /* Return the end address of our pad. */
1973 *jump_entry = buildaddr;
1974
1975 return 0;
1976}
1977
1978static int
1979x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1980 CORE_ADDR collector,
1981 CORE_ADDR lockaddr,
1982 ULONGEST orig_size,
1983 CORE_ADDR *jump_entry,
405f8e94
SS
1984 CORE_ADDR *trampoline,
1985 ULONGEST *trampoline_size,
fa593d66
PA
1986 unsigned char *jjump_pad_insn,
1987 ULONGEST *jjump_pad_insn_size,
1988 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1989 CORE_ADDR *adjusted_insn_addr_end,
1990 char *err)
fa593d66
PA
1991{
1992#ifdef __x86_64__
3aee8918 1993 if (is_64bit_tdesc ())
fa593d66
PA
1994 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1995 collector, lockaddr,
1996 orig_size, jump_entry,
405f8e94 1997 trampoline, trampoline_size,
fa593d66
PA
1998 jjump_pad_insn,
1999 jjump_pad_insn_size,
2000 adjusted_insn_addr,
405f8e94
SS
2001 adjusted_insn_addr_end,
2002 err);
fa593d66
PA
2003#endif
2004
2005 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2006 collector, lockaddr,
2007 orig_size, jump_entry,
405f8e94 2008 trampoline, trampoline_size,
fa593d66
PA
2009 jjump_pad_insn,
2010 jjump_pad_insn_size,
2011 adjusted_insn_addr,
405f8e94
SS
2012 adjusted_insn_addr_end,
2013 err);
2014}
2015
2016/* Return the minimum instruction length for fast tracepoints on x86/x86-64
2017 architectures. */
2018
2019static int
2020x86_get_min_fast_tracepoint_insn_len (void)
2021{
2022 static int warned_about_fast_tracepoints = 0;
2023
2024#ifdef __x86_64__
2025 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2026 used for fast tracepoints. */
3aee8918 2027 if (is_64bit_tdesc ())
405f8e94
SS
2028 return 5;
2029#endif
2030
58b4daa5 2031 if (agent_loaded_p ())
405f8e94
SS
2032 {
2033 char errbuf[IPA_BUFSIZ];
2034
2035 errbuf[0] = '\0';
2036
2037 /* On x86, if trampolines are available, then 4-byte jump instructions
2038 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2039 with a 4-byte offset are used instead. */
2040 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2041 return 4;
2042 else
2043 {
2044 /* GDB has no channel to explain to user why a shorter fast
2045 tracepoint is not possible, but at least make GDBserver
2046 mention that something has gone awry. */
2047 if (!warned_about_fast_tracepoints)
2048 {
2049 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2050 warned_about_fast_tracepoints = 1;
2051 }
2052 return 5;
2053 }
2054 }
2055 else
2056 {
2057 /* Indicate that the minimum length is currently unknown since the IPA
2058 has not loaded yet. */
2059 return 0;
2060 }
fa593d66
PA
2061}
2062
6a271cae
PA
2063static void
2064add_insns (unsigned char *start, int len)
2065{
2066 CORE_ADDR buildaddr = current_insn_ptr;
2067
2068 if (debug_threads)
87ce2a04
DE
2069 debug_printf ("Adding %d bytes of insn at %s\n",
2070 len, paddress (buildaddr));
6a271cae
PA
2071
2072 append_insns (&buildaddr, len, start);
2073 current_insn_ptr = buildaddr;
2074}
2075
6a271cae
PA
2076/* Our general strategy for emitting code is to avoid specifying raw
2077 bytes whenever possible, and instead copy a block of inline asm
2078 that is embedded in the function. This is a little messy, because
2079 we need to keep the compiler from discarding what looks like dead
2080 code, plus suppress various warnings. */
2081
9e4344e5
PA
2082#define EMIT_ASM(NAME, INSNS) \
2083 do \
2084 { \
2085 extern unsigned char start_ ## NAME, end_ ## NAME; \
2086 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2087 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2088 "\t" "start_" #NAME ":" \
2089 "\t" INSNS "\n" \
2090 "\t" "end_" #NAME ":"); \
2091 } while (0)
6a271cae
PA
2092
2093#ifdef __x86_64__
2094
2095#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2096 do \
2097 { \
2098 extern unsigned char start_ ## NAME, end_ ## NAME; \
2099 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2100 __asm__ (".code32\n" \
2101 "\t" "jmp end_" #NAME "\n" \
2102 "\t" "start_" #NAME ":\n" \
2103 "\t" INSNS "\n" \
2104 "\t" "end_" #NAME ":\n" \
2105 ".code64\n"); \
2106 } while (0)
6a271cae
PA
2107
2108#else
2109
2110#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2111
2112#endif
2113
2114#ifdef __x86_64__
2115
2116static void
2117amd64_emit_prologue (void)
2118{
2119 EMIT_ASM (amd64_prologue,
2120 "pushq %rbp\n\t"
2121 "movq %rsp,%rbp\n\t"
2122 "sub $0x20,%rsp\n\t"
2123 "movq %rdi,-8(%rbp)\n\t"
2124 "movq %rsi,-16(%rbp)");
2125}
2126
2127
2128static void
2129amd64_emit_epilogue (void)
2130{
2131 EMIT_ASM (amd64_epilogue,
2132 "movq -16(%rbp),%rdi\n\t"
2133 "movq %rax,(%rdi)\n\t"
2134 "xor %rax,%rax\n\t"
2135 "leave\n\t"
2136 "ret");
2137}
2138
2139static void
2140amd64_emit_add (void)
2141{
2142 EMIT_ASM (amd64_add,
2143 "add (%rsp),%rax\n\t"
2144 "lea 0x8(%rsp),%rsp");
2145}
2146
2147static void
2148amd64_emit_sub (void)
2149{
2150 EMIT_ASM (amd64_sub,
2151 "sub %rax,(%rsp)\n\t"
2152 "pop %rax");
2153}
2154
2155static void
2156amd64_emit_mul (void)
2157{
2158 emit_error = 1;
2159}
2160
2161static void
2162amd64_emit_lsh (void)
2163{
2164 emit_error = 1;
2165}
2166
2167static void
2168amd64_emit_rsh_signed (void)
2169{
2170 emit_error = 1;
2171}
2172
2173static void
2174amd64_emit_rsh_unsigned (void)
2175{
2176 emit_error = 1;
2177}
2178
2179static void
2180amd64_emit_ext (int arg)
2181{
2182 switch (arg)
2183 {
2184 case 8:
2185 EMIT_ASM (amd64_ext_8,
2186 "cbtw\n\t"
2187 "cwtl\n\t"
2188 "cltq");
2189 break;
2190 case 16:
2191 EMIT_ASM (amd64_ext_16,
2192 "cwtl\n\t"
2193 "cltq");
2194 break;
2195 case 32:
2196 EMIT_ASM (amd64_ext_32,
2197 "cltq");
2198 break;
2199 default:
2200 emit_error = 1;
2201 }
2202}
2203
2204static void
2205amd64_emit_log_not (void)
2206{
2207 EMIT_ASM (amd64_log_not,
2208 "test %rax,%rax\n\t"
2209 "sete %cl\n\t"
2210 "movzbq %cl,%rax");
2211}
2212
2213static void
2214amd64_emit_bit_and (void)
2215{
2216 EMIT_ASM (amd64_and,
2217 "and (%rsp),%rax\n\t"
2218 "lea 0x8(%rsp),%rsp");
2219}
2220
2221static void
2222amd64_emit_bit_or (void)
2223{
2224 EMIT_ASM (amd64_or,
2225 "or (%rsp),%rax\n\t"
2226 "lea 0x8(%rsp),%rsp");
2227}
2228
2229static void
2230amd64_emit_bit_xor (void)
2231{
2232 EMIT_ASM (amd64_xor,
2233 "xor (%rsp),%rax\n\t"
2234 "lea 0x8(%rsp),%rsp");
2235}
2236
2237static void
2238amd64_emit_bit_not (void)
2239{
2240 EMIT_ASM (amd64_bit_not,
2241 "xorq $0xffffffffffffffff,%rax");
2242}
2243
2244static void
2245amd64_emit_equal (void)
2246{
2247 EMIT_ASM (amd64_equal,
2248 "cmp %rax,(%rsp)\n\t"
2249 "je .Lamd64_equal_true\n\t"
2250 "xor %rax,%rax\n\t"
2251 "jmp .Lamd64_equal_end\n\t"
2252 ".Lamd64_equal_true:\n\t"
2253 "mov $0x1,%rax\n\t"
2254 ".Lamd64_equal_end:\n\t"
2255 "lea 0x8(%rsp),%rsp");
2256}
2257
2258static void
2259amd64_emit_less_signed (void)
2260{
2261 EMIT_ASM (amd64_less_signed,
2262 "cmp %rax,(%rsp)\n\t"
2263 "jl .Lamd64_less_signed_true\n\t"
2264 "xor %rax,%rax\n\t"
2265 "jmp .Lamd64_less_signed_end\n\t"
2266 ".Lamd64_less_signed_true:\n\t"
2267 "mov $1,%rax\n\t"
2268 ".Lamd64_less_signed_end:\n\t"
2269 "lea 0x8(%rsp),%rsp");
2270}
2271
2272static void
2273amd64_emit_less_unsigned (void)
2274{
2275 EMIT_ASM (amd64_less_unsigned,
2276 "cmp %rax,(%rsp)\n\t"
2277 "jb .Lamd64_less_unsigned_true\n\t"
2278 "xor %rax,%rax\n\t"
2279 "jmp .Lamd64_less_unsigned_end\n\t"
2280 ".Lamd64_less_unsigned_true:\n\t"
2281 "mov $1,%rax\n\t"
2282 ".Lamd64_less_unsigned_end:\n\t"
2283 "lea 0x8(%rsp),%rsp");
2284}
2285
2286static void
2287amd64_emit_ref (int size)
2288{
2289 switch (size)
2290 {
2291 case 1:
2292 EMIT_ASM (amd64_ref1,
2293 "movb (%rax),%al");
2294 break;
2295 case 2:
2296 EMIT_ASM (amd64_ref2,
2297 "movw (%rax),%ax");
2298 break;
2299 case 4:
2300 EMIT_ASM (amd64_ref4,
2301 "movl (%rax),%eax");
2302 break;
2303 case 8:
2304 EMIT_ASM (amd64_ref8,
2305 "movq (%rax),%rax");
2306 break;
2307 }
2308}
2309
2310static void
2311amd64_emit_if_goto (int *offset_p, int *size_p)
2312{
2313 EMIT_ASM (amd64_if_goto,
2314 "mov %rax,%rcx\n\t"
2315 "pop %rax\n\t"
2316 "cmp $0,%rcx\n\t"
2317 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2318 if (offset_p)
2319 *offset_p = 10;
2320 if (size_p)
2321 *size_p = 4;
2322}
2323
2324static void
2325amd64_emit_goto (int *offset_p, int *size_p)
2326{
2327 EMIT_ASM (amd64_goto,
2328 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2329 if (offset_p)
2330 *offset_p = 1;
2331 if (size_p)
2332 *size_p = 4;
2333}
2334
2335static void
2336amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2337{
2338 int diff = (to - (from + size));
2339 unsigned char buf[sizeof (int)];
2340
2341 if (size != 4)
2342 {
2343 emit_error = 1;
2344 return;
2345 }
2346
2347 memcpy (buf, &diff, sizeof (int));
2348 write_inferior_memory (from, buf, sizeof (int));
2349}
2350
2351static void
4e29fb54 2352amd64_emit_const (LONGEST num)
6a271cae
PA
2353{
2354 unsigned char buf[16];
2355 int i;
2356 CORE_ADDR buildaddr = current_insn_ptr;
2357
2358 i = 0;
2359 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2360 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2361 i += 8;
2362 append_insns (&buildaddr, i, buf);
2363 current_insn_ptr = buildaddr;
2364}
2365
2366static void
2367amd64_emit_call (CORE_ADDR fn)
2368{
2369 unsigned char buf[16];
2370 int i;
2371 CORE_ADDR buildaddr;
4e29fb54 2372 LONGEST offset64;
6a271cae
PA
2373
2374 /* The destination function being in the shared library, may be
2375 >31-bits away off the compiled code pad. */
2376
2377 buildaddr = current_insn_ptr;
2378
2379 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2380
2381 i = 0;
2382
2383 if (offset64 > INT_MAX || offset64 < INT_MIN)
2384 {
2385 /* Offset is too large for a call. Use callq, but that requires
2386 a register, so avoid it if possible. Use r10, since it is
2387 call-clobbered, we don't have to push/pop it. */
2388 buf[i++] = 0x48; /* mov $fn,%r10 */
2389 buf[i++] = 0xba;
2390 memcpy (buf + i, &fn, 8);
2391 i += 8;
2392 buf[i++] = 0xff; /* callq *%r10 */
2393 buf[i++] = 0xd2;
2394 }
2395 else
2396 {
2397 int offset32 = offset64; /* we know we can't overflow here. */
2398 memcpy (buf + i, &offset32, 4);
2399 i += 4;
2400 }
2401
2402 append_insns (&buildaddr, i, buf);
2403 current_insn_ptr = buildaddr;
2404}
2405
2406static void
2407amd64_emit_reg (int reg)
2408{
2409 unsigned char buf[16];
2410 int i;
2411 CORE_ADDR buildaddr;
2412
2413 /* Assume raw_regs is still in %rdi. */
2414 buildaddr = current_insn_ptr;
2415 i = 0;
2416 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2417 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2418 i += 4;
2419 append_insns (&buildaddr, i, buf);
2420 current_insn_ptr = buildaddr;
2421 amd64_emit_call (get_raw_reg_func_addr ());
2422}
2423
2424static void
2425amd64_emit_pop (void)
2426{
2427 EMIT_ASM (amd64_pop,
2428 "pop %rax");
2429}
2430
2431static void
2432amd64_emit_stack_flush (void)
2433{
2434 EMIT_ASM (amd64_stack_flush,
2435 "push %rax");
2436}
2437
2438static void
2439amd64_emit_zero_ext (int arg)
2440{
2441 switch (arg)
2442 {
2443 case 8:
2444 EMIT_ASM (amd64_zero_ext_8,
2445 "and $0xff,%rax");
2446 break;
2447 case 16:
2448 EMIT_ASM (amd64_zero_ext_16,
2449 "and $0xffff,%rax");
2450 break;
2451 case 32:
2452 EMIT_ASM (amd64_zero_ext_32,
2453 "mov $0xffffffff,%rcx\n\t"
2454 "and %rcx,%rax");
2455 break;
2456 default:
2457 emit_error = 1;
2458 }
2459}
2460
2461static void
2462amd64_emit_swap (void)
2463{
2464 EMIT_ASM (amd64_swap,
2465 "mov %rax,%rcx\n\t"
2466 "pop %rax\n\t"
2467 "push %rcx");
2468}
2469
2470static void
2471amd64_emit_stack_adjust (int n)
2472{
2473 unsigned char buf[16];
2474 int i;
2475 CORE_ADDR buildaddr = current_insn_ptr;
2476
2477 i = 0;
2478 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2479 buf[i++] = 0x8d;
2480 buf[i++] = 0x64;
2481 buf[i++] = 0x24;
2482 /* This only handles adjustments up to 16, but we don't expect any more. */
2483 buf[i++] = n * 8;
2484 append_insns (&buildaddr, i, buf);
2485 current_insn_ptr = buildaddr;
2486}
2487
2488/* FN's prototype is `LONGEST(*fn)(int)'. */
2489
2490static void
2491amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2492{
2493 unsigned char buf[16];
2494 int i;
2495 CORE_ADDR buildaddr;
2496
2497 buildaddr = current_insn_ptr;
2498 i = 0;
2499 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2500 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2501 i += 4;
2502 append_insns (&buildaddr, i, buf);
2503 current_insn_ptr = buildaddr;
2504 amd64_emit_call (fn);
2505}
2506
4e29fb54 2507/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2508
2509static void
2510amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2511{
2512 unsigned char buf[16];
2513 int i;
2514 CORE_ADDR buildaddr;
2515
2516 buildaddr = current_insn_ptr;
2517 i = 0;
2518 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2519 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2520 i += 4;
2521 append_insns (&buildaddr, i, buf);
2522 current_insn_ptr = buildaddr;
2523 EMIT_ASM (amd64_void_call_2_a,
2524 /* Save away a copy of the stack top. */
2525 "push %rax\n\t"
2526 /* Also pass top as the second argument. */
2527 "mov %rax,%rsi");
2528 amd64_emit_call (fn);
2529 EMIT_ASM (amd64_void_call_2_b,
2530 /* Restore the stack top, %rax may have been trashed. */
2531 "pop %rax");
2532}
2533
6b9801d4
SS
2534void
2535amd64_emit_eq_goto (int *offset_p, int *size_p)
2536{
2537 EMIT_ASM (amd64_eq,
2538 "cmp %rax,(%rsp)\n\t"
2539 "jne .Lamd64_eq_fallthru\n\t"
2540 "lea 0x8(%rsp),%rsp\n\t"
2541 "pop %rax\n\t"
2542 /* jmp, but don't trust the assembler to choose the right jump */
2543 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2544 ".Lamd64_eq_fallthru:\n\t"
2545 "lea 0x8(%rsp),%rsp\n\t"
2546 "pop %rax");
2547
2548 if (offset_p)
2549 *offset_p = 13;
2550 if (size_p)
2551 *size_p = 4;
2552}
2553
2554void
2555amd64_emit_ne_goto (int *offset_p, int *size_p)
2556{
2557 EMIT_ASM (amd64_ne,
2558 "cmp %rax,(%rsp)\n\t"
2559 "je .Lamd64_ne_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2561 "pop %rax\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_ne_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2566 "pop %rax");
2567
2568 if (offset_p)
2569 *offset_p = 13;
2570 if (size_p)
2571 *size_p = 4;
2572}
2573
2574void
2575amd64_emit_lt_goto (int *offset_p, int *size_p)
2576{
2577 EMIT_ASM (amd64_lt,
2578 "cmp %rax,(%rsp)\n\t"
2579 "jnl .Lamd64_lt_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2581 "pop %rax\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_lt_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax");
2587
2588 if (offset_p)
2589 *offset_p = 13;
2590 if (size_p)
2591 *size_p = 4;
2592}
2593
2594void
2595amd64_emit_le_goto (int *offset_p, int *size_p)
2596{
2597 EMIT_ASM (amd64_le,
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnle .Lamd64_le_fallthru\n\t"
2600 "lea 0x8(%rsp),%rsp\n\t"
2601 "pop %rax\n\t"
2602 /* jmp, but don't trust the assembler to choose the right jump */
2603 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2604 ".Lamd64_le_fallthru:\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2606 "pop %rax");
2607
2608 if (offset_p)
2609 *offset_p = 13;
2610 if (size_p)
2611 *size_p = 4;
2612}
2613
2614void
2615amd64_emit_gt_goto (int *offset_p, int *size_p)
2616{
2617 EMIT_ASM (amd64_gt,
2618 "cmp %rax,(%rsp)\n\t"
2619 "jng .Lamd64_gt_fallthru\n\t"
2620 "lea 0x8(%rsp),%rsp\n\t"
2621 "pop %rax\n\t"
2622 /* jmp, but don't trust the assembler to choose the right jump */
2623 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2624 ".Lamd64_gt_fallthru:\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2626 "pop %rax");
2627
2628 if (offset_p)
2629 *offset_p = 13;
2630 if (size_p)
2631 *size_p = 4;
2632}
2633
2634void
2635amd64_emit_ge_goto (int *offset_p, int *size_p)
2636{
2637 EMIT_ASM (amd64_ge,
2638 "cmp %rax,(%rsp)\n\t"
2639 "jnge .Lamd64_ge_fallthru\n\t"
2640 ".Lamd64_ge_jump:\n\t"
2641 "lea 0x8(%rsp),%rsp\n\t"
2642 "pop %rax\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Lamd64_ge_fallthru:\n\t"
2646 "lea 0x8(%rsp),%rsp\n\t"
2647 "pop %rax");
2648
2649 if (offset_p)
2650 *offset_p = 13;
2651 if (size_p)
2652 *size_p = 4;
2653}
2654
6a271cae
PA
2655struct emit_ops amd64_emit_ops =
2656 {
2657 amd64_emit_prologue,
2658 amd64_emit_epilogue,
2659 amd64_emit_add,
2660 amd64_emit_sub,
2661 amd64_emit_mul,
2662 amd64_emit_lsh,
2663 amd64_emit_rsh_signed,
2664 amd64_emit_rsh_unsigned,
2665 amd64_emit_ext,
2666 amd64_emit_log_not,
2667 amd64_emit_bit_and,
2668 amd64_emit_bit_or,
2669 amd64_emit_bit_xor,
2670 amd64_emit_bit_not,
2671 amd64_emit_equal,
2672 amd64_emit_less_signed,
2673 amd64_emit_less_unsigned,
2674 amd64_emit_ref,
2675 amd64_emit_if_goto,
2676 amd64_emit_goto,
2677 amd64_write_goto_address,
2678 amd64_emit_const,
2679 amd64_emit_call,
2680 amd64_emit_reg,
2681 amd64_emit_pop,
2682 amd64_emit_stack_flush,
2683 amd64_emit_zero_ext,
2684 amd64_emit_swap,
2685 amd64_emit_stack_adjust,
2686 amd64_emit_int_call_1,
6b9801d4
SS
2687 amd64_emit_void_call_2,
2688 amd64_emit_eq_goto,
2689 amd64_emit_ne_goto,
2690 amd64_emit_lt_goto,
2691 amd64_emit_le_goto,
2692 amd64_emit_gt_goto,
2693 amd64_emit_ge_goto
6a271cae
PA
2694 };
2695
2696#endif /* __x86_64__ */
2697
2698static void
2699i386_emit_prologue (void)
2700{
2701 EMIT_ASM32 (i386_prologue,
2702 "push %ebp\n\t"
bf15cbda
SS
2703 "mov %esp,%ebp\n\t"
2704 "push %ebx");
6a271cae
PA
2705 /* At this point, the raw regs base address is at 8(%ebp), and the
2706 value pointer is at 12(%ebp). */
2707}
2708
2709static void
2710i386_emit_epilogue (void)
2711{
2712 EMIT_ASM32 (i386_epilogue,
2713 "mov 12(%ebp),%ecx\n\t"
2714 "mov %eax,(%ecx)\n\t"
2715 "mov %ebx,0x4(%ecx)\n\t"
2716 "xor %eax,%eax\n\t"
bf15cbda 2717 "pop %ebx\n\t"
6a271cae
PA
2718 "pop %ebp\n\t"
2719 "ret");
2720}
2721
2722static void
2723i386_emit_add (void)
2724{
2725 EMIT_ASM32 (i386_add,
2726 "add (%esp),%eax\n\t"
2727 "adc 0x4(%esp),%ebx\n\t"
2728 "lea 0x8(%esp),%esp");
2729}
2730
2731static void
2732i386_emit_sub (void)
2733{
2734 EMIT_ASM32 (i386_sub,
2735 "subl %eax,(%esp)\n\t"
2736 "sbbl %ebx,4(%esp)\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx\n\t");
2739}
2740
2741static void
2742i386_emit_mul (void)
2743{
2744 emit_error = 1;
2745}
2746
2747static void
2748i386_emit_lsh (void)
2749{
2750 emit_error = 1;
2751}
2752
2753static void
2754i386_emit_rsh_signed (void)
2755{
2756 emit_error = 1;
2757}
2758
2759static void
2760i386_emit_rsh_unsigned (void)
2761{
2762 emit_error = 1;
2763}
2764
2765static void
2766i386_emit_ext (int arg)
2767{
2768 switch (arg)
2769 {
2770 case 8:
2771 EMIT_ASM32 (i386_ext_8,
2772 "cbtw\n\t"
2773 "cwtl\n\t"
2774 "movl %eax,%ebx\n\t"
2775 "sarl $31,%ebx");
2776 break;
2777 case 16:
2778 EMIT_ASM32 (i386_ext_16,
2779 "cwtl\n\t"
2780 "movl %eax,%ebx\n\t"
2781 "sarl $31,%ebx");
2782 break;
2783 case 32:
2784 EMIT_ASM32 (i386_ext_32,
2785 "movl %eax,%ebx\n\t"
2786 "sarl $31,%ebx");
2787 break;
2788 default:
2789 emit_error = 1;
2790 }
2791}
2792
2793static void
2794i386_emit_log_not (void)
2795{
2796 EMIT_ASM32 (i386_log_not,
2797 "or %ebx,%eax\n\t"
2798 "test %eax,%eax\n\t"
2799 "sete %cl\n\t"
2800 "xor %ebx,%ebx\n\t"
2801 "movzbl %cl,%eax");
2802}
2803
2804static void
2805i386_emit_bit_and (void)
2806{
2807 EMIT_ASM32 (i386_and,
2808 "and (%esp),%eax\n\t"
2809 "and 0x4(%esp),%ebx\n\t"
2810 "lea 0x8(%esp),%esp");
2811}
2812
2813static void
2814i386_emit_bit_or (void)
2815{
2816 EMIT_ASM32 (i386_or,
2817 "or (%esp),%eax\n\t"
2818 "or 0x4(%esp),%ebx\n\t"
2819 "lea 0x8(%esp),%esp");
2820}
2821
2822static void
2823i386_emit_bit_xor (void)
2824{
2825 EMIT_ASM32 (i386_xor,
2826 "xor (%esp),%eax\n\t"
2827 "xor 0x4(%esp),%ebx\n\t"
2828 "lea 0x8(%esp),%esp");
2829}
2830
2831static void
2832i386_emit_bit_not (void)
2833{
2834 EMIT_ASM32 (i386_bit_not,
2835 "xor $0xffffffff,%eax\n\t"
2836 "xor $0xffffffff,%ebx\n\t");
2837}
2838
2839static void
2840i386_emit_equal (void)
2841{
2842 EMIT_ASM32 (i386_equal,
2843 "cmpl %ebx,4(%esp)\n\t"
2844 "jne .Li386_equal_false\n\t"
2845 "cmpl %eax,(%esp)\n\t"
2846 "je .Li386_equal_true\n\t"
2847 ".Li386_equal_false:\n\t"
2848 "xor %eax,%eax\n\t"
2849 "jmp .Li386_equal_end\n\t"
2850 ".Li386_equal_true:\n\t"
2851 "mov $1,%eax\n\t"
2852 ".Li386_equal_end:\n\t"
2853 "xor %ebx,%ebx\n\t"
2854 "lea 0x8(%esp),%esp");
2855}
2856
2857static void
2858i386_emit_less_signed (void)
2859{
2860 EMIT_ASM32 (i386_less_signed,
2861 "cmpl %ebx,4(%esp)\n\t"
2862 "jl .Li386_less_signed_true\n\t"
2863 "jne .Li386_less_signed_false\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jl .Li386_less_signed_true\n\t"
2866 ".Li386_less_signed_false:\n\t"
2867 "xor %eax,%eax\n\t"
2868 "jmp .Li386_less_signed_end\n\t"
2869 ".Li386_less_signed_true:\n\t"
2870 "mov $1,%eax\n\t"
2871 ".Li386_less_signed_end:\n\t"
2872 "xor %ebx,%ebx\n\t"
2873 "lea 0x8(%esp),%esp");
2874}
2875
2876static void
2877i386_emit_less_unsigned (void)
2878{
2879 EMIT_ASM32 (i386_less_unsigned,
2880 "cmpl %ebx,4(%esp)\n\t"
2881 "jb .Li386_less_unsigned_true\n\t"
2882 "jne .Li386_less_unsigned_false\n\t"
2883 "cmpl %eax,(%esp)\n\t"
2884 "jb .Li386_less_unsigned_true\n\t"
2885 ".Li386_less_unsigned_false:\n\t"
2886 "xor %eax,%eax\n\t"
2887 "jmp .Li386_less_unsigned_end\n\t"
2888 ".Li386_less_unsigned_true:\n\t"
2889 "mov $1,%eax\n\t"
2890 ".Li386_less_unsigned_end:\n\t"
2891 "xor %ebx,%ebx\n\t"
2892 "lea 0x8(%esp),%esp");
2893}
2894
2895static void
2896i386_emit_ref (int size)
2897{
2898 switch (size)
2899 {
2900 case 1:
2901 EMIT_ASM32 (i386_ref1,
2902 "movb (%eax),%al");
2903 break;
2904 case 2:
2905 EMIT_ASM32 (i386_ref2,
2906 "movw (%eax),%ax");
2907 break;
2908 case 4:
2909 EMIT_ASM32 (i386_ref4,
2910 "movl (%eax),%eax");
2911 break;
2912 case 8:
2913 EMIT_ASM32 (i386_ref8,
2914 "movl 4(%eax),%ebx\n\t"
2915 "movl (%eax),%eax");
2916 break;
2917 }
2918}
2919
2920static void
2921i386_emit_if_goto (int *offset_p, int *size_p)
2922{
2923 EMIT_ASM32 (i386_if_goto,
2924 "mov %eax,%ecx\n\t"
2925 "or %ebx,%ecx\n\t"
2926 "pop %eax\n\t"
2927 "pop %ebx\n\t"
2928 "cmpl $0,%ecx\n\t"
2929 /* Don't trust the assembler to choose the right jump */
2930 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2931
2932 if (offset_p)
2933 *offset_p = 11; /* be sure that this matches the sequence above */
2934 if (size_p)
2935 *size_p = 4;
2936}
2937
2938static void
2939i386_emit_goto (int *offset_p, int *size_p)
2940{
2941 EMIT_ASM32 (i386_goto,
2942 /* Don't trust the assembler to choose the right jump */
2943 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2944 if (offset_p)
2945 *offset_p = 1;
2946 if (size_p)
2947 *size_p = 4;
2948}
2949
2950static void
2951i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2952{
2953 int diff = (to - (from + size));
2954 unsigned char buf[sizeof (int)];
2955
2956 /* We're only doing 4-byte sizes at the moment. */
2957 if (size != 4)
2958 {
2959 emit_error = 1;
2960 return;
2961 }
2962
2963 memcpy (buf, &diff, sizeof (int));
2964 write_inferior_memory (from, buf, sizeof (int));
2965}
2966
2967static void
4e29fb54 2968i386_emit_const (LONGEST num)
6a271cae
PA
2969{
2970 unsigned char buf[16];
b00ad6ff 2971 int i, hi, lo;
6a271cae
PA
2972 CORE_ADDR buildaddr = current_insn_ptr;
2973
2974 i = 0;
2975 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2976 lo = num & 0xffffffff;
2977 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2978 i += 4;
2979 hi = ((num >> 32) & 0xffffffff);
2980 if (hi)
2981 {
2982 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2983 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2984 i += 4;
2985 }
2986 else
2987 {
2988 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2989 }
2990 append_insns (&buildaddr, i, buf);
2991 current_insn_ptr = buildaddr;
2992}
2993
2994static void
2995i386_emit_call (CORE_ADDR fn)
2996{
2997 unsigned char buf[16];
2998 int i, offset;
2999 CORE_ADDR buildaddr;
3000
3001 buildaddr = current_insn_ptr;
3002 i = 0;
3003 buf[i++] = 0xe8; /* call <reladdr> */
3004 offset = ((int) fn) - (buildaddr + 5);
3005 memcpy (buf + 1, &offset, 4);
3006 append_insns (&buildaddr, 5, buf);
3007 current_insn_ptr = buildaddr;
3008}
3009
3010static void
3011i386_emit_reg (int reg)
3012{
3013 unsigned char buf[16];
3014 int i;
3015 CORE_ADDR buildaddr;
3016
3017 EMIT_ASM32 (i386_reg_a,
3018 "sub $0x8,%esp");
3019 buildaddr = current_insn_ptr;
3020 i = 0;
3021 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 3022 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
3023 i += 4;
3024 append_insns (&buildaddr, i, buf);
3025 current_insn_ptr = buildaddr;
3026 EMIT_ASM32 (i386_reg_b,
3027 "mov %eax,4(%esp)\n\t"
3028 "mov 8(%ebp),%eax\n\t"
3029 "mov %eax,(%esp)");
3030 i386_emit_call (get_raw_reg_func_addr ());
3031 EMIT_ASM32 (i386_reg_c,
3032 "xor %ebx,%ebx\n\t"
3033 "lea 0x8(%esp),%esp");
3034}
3035
3036static void
3037i386_emit_pop (void)
3038{
3039 EMIT_ASM32 (i386_pop,
3040 "pop %eax\n\t"
3041 "pop %ebx");
3042}
3043
3044static void
3045i386_emit_stack_flush (void)
3046{
3047 EMIT_ASM32 (i386_stack_flush,
3048 "push %ebx\n\t"
3049 "push %eax");
3050}
3051
3052static void
3053i386_emit_zero_ext (int arg)
3054{
3055 switch (arg)
3056 {
3057 case 8:
3058 EMIT_ASM32 (i386_zero_ext_8,
3059 "and $0xff,%eax\n\t"
3060 "xor %ebx,%ebx");
3061 break;
3062 case 16:
3063 EMIT_ASM32 (i386_zero_ext_16,
3064 "and $0xffff,%eax\n\t"
3065 "xor %ebx,%ebx");
3066 break;
3067 case 32:
3068 EMIT_ASM32 (i386_zero_ext_32,
3069 "xor %ebx,%ebx");
3070 break;
3071 default:
3072 emit_error = 1;
3073 }
3074}
3075
3076static void
3077i386_emit_swap (void)
3078{
3079 EMIT_ASM32 (i386_swap,
3080 "mov %eax,%ecx\n\t"
3081 "mov %ebx,%edx\n\t"
3082 "pop %eax\n\t"
3083 "pop %ebx\n\t"
3084 "push %edx\n\t"
3085 "push %ecx");
3086}
3087
3088static void
3089i386_emit_stack_adjust (int n)
3090{
3091 unsigned char buf[16];
3092 int i;
3093 CORE_ADDR buildaddr = current_insn_ptr;
3094
3095 i = 0;
3096 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3097 buf[i++] = 0x64;
3098 buf[i++] = 0x24;
3099 buf[i++] = n * 8;
3100 append_insns (&buildaddr, i, buf);
3101 current_insn_ptr = buildaddr;
3102}
3103
3104/* FN's prototype is `LONGEST(*fn)(int)'. */
3105
3106static void
3107i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3108{
3109 unsigned char buf[16];
3110 int i;
3111 CORE_ADDR buildaddr;
3112
3113 EMIT_ASM32 (i386_int_call_1_a,
3114 /* Reserve a bit of stack space. */
3115 "sub $0x8,%esp");
3116 /* Put the one argument on the stack. */
3117 buildaddr = current_insn_ptr;
3118 i = 0;
3119 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3120 buf[i++] = 0x04;
3121 buf[i++] = 0x24;
b00ad6ff 3122 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3123 i += 4;
3124 append_insns (&buildaddr, i, buf);
3125 current_insn_ptr = buildaddr;
3126 i386_emit_call (fn);
3127 EMIT_ASM32 (i386_int_call_1_c,
3128 "mov %edx,%ebx\n\t"
3129 "lea 0x8(%esp),%esp");
3130}
3131
4e29fb54 3132/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3133
3134static void
3135i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3136{
3137 unsigned char buf[16];
3138 int i;
3139 CORE_ADDR buildaddr;
3140
3141 EMIT_ASM32 (i386_void_call_2_a,
3142 /* Preserve %eax only; we don't have to worry about %ebx. */
3143 "push %eax\n\t"
3144 /* Reserve a bit of stack space for arguments. */
3145 "sub $0x10,%esp\n\t"
3146 /* Copy "top" to the second argument position. (Note that
3147 we can't assume function won't scribble on its
3148 arguments, so don't try to restore from this.) */
3149 "mov %eax,4(%esp)\n\t"
3150 "mov %ebx,8(%esp)");
3151 /* Put the first argument on the stack. */
3152 buildaddr = current_insn_ptr;
3153 i = 0;
3154 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3155 buf[i++] = 0x04;
3156 buf[i++] = 0x24;
b00ad6ff 3157 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3158 i += 4;
3159 append_insns (&buildaddr, i, buf);
3160 current_insn_ptr = buildaddr;
3161 i386_emit_call (fn);
3162 EMIT_ASM32 (i386_void_call_2_b,
3163 "lea 0x10(%esp),%esp\n\t"
3164 /* Restore original stack top. */
3165 "pop %eax");
3166}
3167
6b9801d4
SS
3168
3169void
3170i386_emit_eq_goto (int *offset_p, int *size_p)
3171{
3172 EMIT_ASM32 (eq,
3173 /* Check low half first, more likely to be decider */
3174 "cmpl %eax,(%esp)\n\t"
3175 "jne .Leq_fallthru\n\t"
3176 "cmpl %ebx,4(%esp)\n\t"
3177 "jne .Leq_fallthru\n\t"
3178 "lea 0x8(%esp),%esp\n\t"
3179 "pop %eax\n\t"
3180 "pop %ebx\n\t"
3181 /* jmp, but don't trust the assembler to choose the right jump */
3182 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3183 ".Leq_fallthru:\n\t"
3184 "lea 0x8(%esp),%esp\n\t"
3185 "pop %eax\n\t"
3186 "pop %ebx");
3187
3188 if (offset_p)
3189 *offset_p = 18;
3190 if (size_p)
3191 *size_p = 4;
3192}
3193
3194void
3195i386_emit_ne_goto (int *offset_p, int *size_p)
3196{
3197 EMIT_ASM32 (ne,
3198 /* Check low half first, more likely to be decider */
3199 "cmpl %eax,(%esp)\n\t"
3200 "jne .Lne_jump\n\t"
3201 "cmpl %ebx,4(%esp)\n\t"
3202 "je .Lne_fallthru\n\t"
3203 ".Lne_jump:\n\t"
3204 "lea 0x8(%esp),%esp\n\t"
3205 "pop %eax\n\t"
3206 "pop %ebx\n\t"
3207 /* jmp, but don't trust the assembler to choose the right jump */
3208 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3209 ".Lne_fallthru:\n\t"
3210 "lea 0x8(%esp),%esp\n\t"
3211 "pop %eax\n\t"
3212 "pop %ebx");
3213
3214 if (offset_p)
3215 *offset_p = 18;
3216 if (size_p)
3217 *size_p = 4;
3218}
3219
3220void
3221i386_emit_lt_goto (int *offset_p, int *size_p)
3222{
3223 EMIT_ASM32 (lt,
3224 "cmpl %ebx,4(%esp)\n\t"
3225 "jl .Llt_jump\n\t"
3226 "jne .Llt_fallthru\n\t"
3227 "cmpl %eax,(%esp)\n\t"
3228 "jnl .Llt_fallthru\n\t"
3229 ".Llt_jump:\n\t"
3230 "lea 0x8(%esp),%esp\n\t"
3231 "pop %eax\n\t"
3232 "pop %ebx\n\t"
3233 /* jmp, but don't trust the assembler to choose the right jump */
3234 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3235 ".Llt_fallthru:\n\t"
3236 "lea 0x8(%esp),%esp\n\t"
3237 "pop %eax\n\t"
3238 "pop %ebx");
3239
3240 if (offset_p)
3241 *offset_p = 20;
3242 if (size_p)
3243 *size_p = 4;
3244}
3245
3246void
3247i386_emit_le_goto (int *offset_p, int *size_p)
3248{
3249 EMIT_ASM32 (le,
3250 "cmpl %ebx,4(%esp)\n\t"
3251 "jle .Lle_jump\n\t"
3252 "jne .Lle_fallthru\n\t"
3253 "cmpl %eax,(%esp)\n\t"
3254 "jnle .Lle_fallthru\n\t"
3255 ".Lle_jump:\n\t"
3256 "lea 0x8(%esp),%esp\n\t"
3257 "pop %eax\n\t"
3258 "pop %ebx\n\t"
3259 /* jmp, but don't trust the assembler to choose the right jump */
3260 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3261 ".Lle_fallthru:\n\t"
3262 "lea 0x8(%esp),%esp\n\t"
3263 "pop %eax\n\t"
3264 "pop %ebx");
3265
3266 if (offset_p)
3267 *offset_p = 20;
3268 if (size_p)
3269 *size_p = 4;
3270}
3271
3272void
3273i386_emit_gt_goto (int *offset_p, int *size_p)
3274{
3275 EMIT_ASM32 (gt,
3276 "cmpl %ebx,4(%esp)\n\t"
3277 "jg .Lgt_jump\n\t"
3278 "jne .Lgt_fallthru\n\t"
3279 "cmpl %eax,(%esp)\n\t"
3280 "jng .Lgt_fallthru\n\t"
3281 ".Lgt_jump:\n\t"
3282 "lea 0x8(%esp),%esp\n\t"
3283 "pop %eax\n\t"
3284 "pop %ebx\n\t"
3285 /* jmp, but don't trust the assembler to choose the right jump */
3286 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3287 ".Lgt_fallthru:\n\t"
3288 "lea 0x8(%esp),%esp\n\t"
3289 "pop %eax\n\t"
3290 "pop %ebx");
3291
3292 if (offset_p)
3293 *offset_p = 20;
3294 if (size_p)
3295 *size_p = 4;
3296}
3297
3298void
3299i386_emit_ge_goto (int *offset_p, int *size_p)
3300{
3301 EMIT_ASM32 (ge,
3302 "cmpl %ebx,4(%esp)\n\t"
3303 "jge .Lge_jump\n\t"
3304 "jne .Lge_fallthru\n\t"
3305 "cmpl %eax,(%esp)\n\t"
3306 "jnge .Lge_fallthru\n\t"
3307 ".Lge_jump:\n\t"
3308 "lea 0x8(%esp),%esp\n\t"
3309 "pop %eax\n\t"
3310 "pop %ebx\n\t"
3311 /* jmp, but don't trust the assembler to choose the right jump */
3312 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3313 ".Lge_fallthru:\n\t"
3314 "lea 0x8(%esp),%esp\n\t"
3315 "pop %eax\n\t"
3316 "pop %ebx");
3317
3318 if (offset_p)
3319 *offset_p = 20;
3320 if (size_p)
3321 *size_p = 4;
3322}
3323
6a271cae
PA
3324struct emit_ops i386_emit_ops =
3325 {
3326 i386_emit_prologue,
3327 i386_emit_epilogue,
3328 i386_emit_add,
3329 i386_emit_sub,
3330 i386_emit_mul,
3331 i386_emit_lsh,
3332 i386_emit_rsh_signed,
3333 i386_emit_rsh_unsigned,
3334 i386_emit_ext,
3335 i386_emit_log_not,
3336 i386_emit_bit_and,
3337 i386_emit_bit_or,
3338 i386_emit_bit_xor,
3339 i386_emit_bit_not,
3340 i386_emit_equal,
3341 i386_emit_less_signed,
3342 i386_emit_less_unsigned,
3343 i386_emit_ref,
3344 i386_emit_if_goto,
3345 i386_emit_goto,
3346 i386_write_goto_address,
3347 i386_emit_const,
3348 i386_emit_call,
3349 i386_emit_reg,
3350 i386_emit_pop,
3351 i386_emit_stack_flush,
3352 i386_emit_zero_ext,
3353 i386_emit_swap,
3354 i386_emit_stack_adjust,
3355 i386_emit_int_call_1,
6b9801d4
SS
3356 i386_emit_void_call_2,
3357 i386_emit_eq_goto,
3358 i386_emit_ne_goto,
3359 i386_emit_lt_goto,
3360 i386_emit_le_goto,
3361 i386_emit_gt_goto,
3362 i386_emit_ge_goto
6a271cae
PA
3363 };
3364
3365
3366static struct emit_ops *
3367x86_emit_ops (void)
3368{
3369#ifdef __x86_64__
3aee8918 3370 if (is_64bit_tdesc ())
6a271cae
PA
3371 return &amd64_emit_ops;
3372 else
3373#endif
3374 return &i386_emit_ops;
3375}
3376
c2d6af84
PA
3377static int
3378x86_supports_range_stepping (void)
3379{
3380 return 1;
3381}
3382
d0722149
DE
3383/* This is initialized assuming an amd64 target.
3384 x86_arch_setup will correct it for i386 or amd64 targets. */
3385
3386struct linux_target_ops the_low_target =
3387{
3388 x86_arch_setup,
3aee8918
PA
3389 x86_linux_regs_info,
3390 x86_cannot_fetch_register,
3391 x86_cannot_store_register,
c14dfd32 3392 NULL, /* fetch_register */
d0722149
DE
3393 x86_get_pc,
3394 x86_set_pc,
3395 x86_breakpoint,
3396 x86_breakpoint_len,
3397 NULL,
3398 1,
3399 x86_breakpoint_at,
aa5ca48f
DE
3400 x86_insert_point,
3401 x86_remove_point,
3402 x86_stopped_by_watchpoint,
3403 x86_stopped_data_address,
d0722149
DE
3404 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3405 native i386 case (no registers smaller than an xfer unit), and are not
3406 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3407 NULL,
3408 NULL,
3409 /* need to fix up i386 siginfo if host is amd64 */
3410 x86_siginfo_fixup,
aa5ca48f
DE
3411 x86_linux_new_process,
3412 x86_linux_new_thread,
1570b33e 3413 x86_linux_prepare_to_resume,
219f2f23 3414 x86_linux_process_qsupported,
fa593d66
PA
3415 x86_supports_tracepoints,
3416 x86_get_thread_area,
6a271cae 3417 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3418 x86_emit_ops,
3419 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3420 x86_supports_range_stepping,
d0722149 3421};
3aee8918
PA
3422
3423void
3424initialize_low_arch (void)
3425{
3426 /* Initialize the Linux target descriptions. */
3427#ifdef __x86_64__
3428 init_registers_amd64_linux ();
3429 init_registers_amd64_avx_linux ();
01f9f808 3430 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3431 init_registers_amd64_mpx_linux ();
3432
3aee8918 3433 init_registers_x32_linux ();
7e5aaa09 3434 init_registers_x32_avx_linux ();
01f9f808 3435 init_registers_x32_avx512_linux ();
3aee8918
PA
3436
3437 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3438 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3439 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3440#endif
3441 init_registers_i386_linux ();
3442 init_registers_i386_mmx_linux ();
3443 init_registers_i386_avx_linux ();
01f9f808 3444 init_registers_i386_avx512_linux ();
a196ebeb 3445 init_registers_i386_mpx_linux ();
3aee8918
PA
3446
3447 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3448 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3449 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3450
3451 initialize_regsets_info (&x86_regsets_info);
3452}
This page took 0.558215 seconds and 4 git commands to generate.