gdb/gdbserver/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
0b302171 3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
d0722149
DE
23#include "server.h"
24#include "linux-low.h"
25#include "i387-fp.h"
aa5ca48f 26#include "i386-low.h"
1570b33e
L
27#include "i386-xstate.h"
28#include "elf/common.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
31
90884b2b 32/* Defined in auto-generated file i386-linux.c. */
d0722149 33void init_registers_i386_linux (void);
90884b2b
L
34/* Defined in auto-generated file amd64-linux.c. */
35void init_registers_amd64_linux (void);
1570b33e
L
36/* Defined in auto-generated file i386-avx-linux.c. */
37void init_registers_i386_avx_linux (void);
38/* Defined in auto-generated file amd64-avx-linux.c. */
39void init_registers_amd64_avx_linux (void);
3a13a53b
L
40/* Defined in auto-generated file i386-mmx-linux.c. */
41void init_registers_i386_mmx_linux (void);
1570b33e 42
fa593d66 43static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 44static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 45
1570b33e
L
46/* Backward compatibility for gdb without XML support. */
47
48static const char *xmltarget_i386_linux_no_xml = "@<target>\
49<architecture>i386</architecture>\
50<osabi>GNU/Linux</osabi>\
51</target>";
f6d1620c
L
52
53#ifdef __x86_64__
1570b33e
L
54static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55<architecture>i386:x86-64</architecture>\
56<osabi>GNU/Linux</osabi>\
57</target>";
f6d1620c 58#endif
d0722149
DE
59
60#include <sys/reg.h>
61#include <sys/procfs.h>
62#include <sys/ptrace.h>
1570b33e
L
63#include <sys/uio.h>
64
65#ifndef PTRACE_GETREGSET
66#define PTRACE_GETREGSET 0x4204
67#endif
68
69#ifndef PTRACE_SETREGSET
70#define PTRACE_SETREGSET 0x4205
71#endif
72
d0722149
DE
73
74#ifndef PTRACE_GET_THREAD_AREA
75#define PTRACE_GET_THREAD_AREA 25
76#endif
77
78/* This definition comes from prctl.h, but some kernels may not have it. */
79#ifndef PTRACE_ARCH_PRCTL
80#define PTRACE_ARCH_PRCTL 30
81#endif
82
83/* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85#ifndef ARCH_GET_FS
86#define ARCH_SET_GS 0x1001
87#define ARCH_SET_FS 0x1002
88#define ARCH_GET_FS 0x1003
89#define ARCH_GET_GS 0x1004
90#endif
91
aa5ca48f
DE
92/* Per-process arch-specific data we want to keep. */
93
94struct arch_process_info
95{
96 struct i386_debug_reg_state debug_reg_state;
97};
98
99/* Per-thread arch-specific data we want to keep. */
100
101struct arch_lwp_info
102{
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105};
106
d0722149
DE
107#ifdef __x86_64__
108
109/* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112static /*const*/ int i386_regmap[] =
113{
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118};
119
120#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122/* So code below doesn't have to care, i386 or amd64. */
123#define ORIG_EAX ORIG_RAX
124
125static const int x86_64_regmap[] =
126{
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138};
139
140#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142#else /* ! __x86_64__ */
143
144/* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146static /*const*/ int i386_regmap[] =
147{
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152};
153
154#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156#endif
157\f
158/* Called by libthread_db. */
159
160ps_err_e
161ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163{
164#ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184#endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196}
fa593d66
PA
197
198/* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203static int
204x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205{
206#ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220#endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
235 lwpid_of (lwp),
236 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
237 return -1;
238
239 *addr = desc[1];
240 return 0;
241 }
242}
243
244
d0722149
DE
245\f
246static int
247i386_cannot_store_register (int regno)
248{
249 return regno >= I386_NUM_REGS;
250}
251
252static int
253i386_cannot_fetch_register (int regno)
254{
255 return regno >= I386_NUM_REGS;
256}
257
258static void
442ea881 259x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
260{
261 int i;
262
263#ifdef __x86_64__
264 if (register_size (0) == 8)
265 {
266 for (i = 0; i < X86_64_NUM_REGS; i++)
267 if (x86_64_regmap[i] != -1)
442ea881 268 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
269 return;
270 }
271#endif
272
273 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 274 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 275
442ea881
PA
276 collect_register_by_name (regcache, "orig_eax",
277 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
278}
279
280static void
442ea881 281x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
282{
283 int i;
284
285#ifdef __x86_64__
286 if (register_size (0) == 8)
287 {
288 for (i = 0; i < X86_64_NUM_REGS; i++)
289 if (x86_64_regmap[i] != -1)
442ea881 290 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
291 return;
292 }
293#endif
294
295 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 296 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 297
442ea881
PA
298 supply_register_by_name (regcache, "orig_eax",
299 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
300}
301
302static void
442ea881 303x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
304{
305#ifdef __x86_64__
442ea881 306 i387_cache_to_fxsave (regcache, buf);
d0722149 307#else
442ea881 308 i387_cache_to_fsave (regcache, buf);
d0722149
DE
309#endif
310}
311
312static void
442ea881 313x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
314{
315#ifdef __x86_64__
442ea881 316 i387_fxsave_to_cache (regcache, buf);
d0722149 317#else
442ea881 318 i387_fsave_to_cache (regcache, buf);
d0722149
DE
319#endif
320}
321
322#ifndef __x86_64__
323
324static void
442ea881 325x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 326{
442ea881 327 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
328}
329
330static void
442ea881 331x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 332{
442ea881 333 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
334}
335
336#endif
337
1570b33e
L
338static void
339x86_fill_xstateregset (struct regcache *regcache, void *buf)
340{
341 i387_cache_to_xsave (regcache, buf);
342}
343
344static void
345x86_store_xstateregset (struct regcache *regcache, const void *buf)
346{
347 i387_xsave_to_cache (regcache, buf);
348}
349
d0722149
DE
350/* ??? The non-biarch i386 case stores all the i387 regs twice.
351 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
352 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
353 doesn't work. IWBN to avoid the duplication in the case where it
354 does work. Maybe the arch_setup routine could check whether it works
355 and update target_regsets accordingly, maybe by moving target_regsets
356 to linux_target_ops and set the right one there, rather than having to
357 modify the target_regsets global. */
358
359struct regset_info target_regsets[] =
360{
361#ifdef HAVE_PTRACE_GETREGS
1570b33e 362 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
363 GENERAL_REGS,
364 x86_fill_gregset, x86_store_gregset },
1570b33e
L
365 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
366 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
367# ifndef __x86_64__
368# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 369 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
370 EXTENDED_REGS,
371 x86_fill_fpxregset, x86_store_fpxregset },
372# endif
373# endif
1570b33e 374 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
375 FP_REGS,
376 x86_fill_fpregset, x86_store_fpregset },
377#endif /* HAVE_PTRACE_GETREGS */
1570b33e 378 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
379};
380
381static CORE_ADDR
442ea881 382x86_get_pc (struct regcache *regcache)
d0722149
DE
383{
384 int use_64bit = register_size (0) == 8;
385
386 if (use_64bit)
387 {
388 unsigned long pc;
442ea881 389 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
390 return (CORE_ADDR) pc;
391 }
392 else
393 {
394 unsigned int pc;
442ea881 395 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
396 return (CORE_ADDR) pc;
397 }
398}
399
400static void
442ea881 401x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
402{
403 int use_64bit = register_size (0) == 8;
404
405 if (use_64bit)
406 {
407 unsigned long newpc = pc;
442ea881 408 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
409 }
410 else
411 {
412 unsigned int newpc = pc;
442ea881 413 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
414 }
415}
416\f
417static const unsigned char x86_breakpoint[] = { 0xCC };
418#define x86_breakpoint_len 1
419
420static int
421x86_breakpoint_at (CORE_ADDR pc)
422{
423 unsigned char c;
424
fc7238bb 425 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
426 if (c == 0xCC)
427 return 1;
428
429 return 0;
430}
431\f
aa5ca48f
DE
432/* Support for debug registers. */
433
434static unsigned long
435x86_linux_dr_get (ptid_t ptid, int regnum)
436{
437 int tid;
438 unsigned long value;
439
440 tid = ptid_get_lwp (ptid);
441
442 errno = 0;
443 value = ptrace (PTRACE_PEEKUSER, tid,
444 offsetof (struct user, u_debugreg[regnum]), 0);
445 if (errno != 0)
446 error ("Couldn't read debug register");
447
448 return value;
449}
450
451static void
452x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
453{
454 int tid;
455
456 tid = ptid_get_lwp (ptid);
457
458 errno = 0;
459 ptrace (PTRACE_POKEUSER, tid,
460 offsetof (struct user, u_debugreg[regnum]), value);
461 if (errno != 0)
462 error ("Couldn't write debug register");
463}
464
964e4306
PA
465static int
466update_debug_registers_callback (struct inferior_list_entry *entry,
467 void *pid_p)
468{
469 struct lwp_info *lwp = (struct lwp_info *) entry;
470 int pid = *(int *) pid_p;
471
472 /* Only update the threads of this process. */
473 if (pid_of (lwp) == pid)
474 {
475 /* The actual update is done later just before resuming the lwp,
476 we just mark that the registers need updating. */
477 lwp->arch_private->debug_registers_changed = 1;
478
479 /* If the lwp isn't stopped, force it to momentarily pause, so
480 we can update its debug registers. */
481 if (!lwp->stopped)
482 linux_stop_lwp (lwp);
483 }
484
485 return 0;
486}
487
aa5ca48f
DE
488/* Update the inferior's debug register REGNUM from STATE. */
489
490void
491i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
492{
964e4306 493 /* Only update the threads of this process. */
aa5ca48f
DE
494 int pid = pid_of (get_thread_lwp (current_inferior));
495
496 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
497 fatal ("Invalid debug register %d", regnum);
498
964e4306
PA
499 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
500}
aa5ca48f 501
964e4306 502/* Return the inferior's debug register REGNUM. */
aa5ca48f 503
964e4306
PA
504CORE_ADDR
505i386_dr_low_get_addr (int regnum)
506{
507 struct lwp_info *lwp = get_thread_lwp (current_inferior);
508 ptid_t ptid = ptid_of (lwp);
509
510 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 511 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
512
513 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
514}
515
516/* Update the inferior's DR7 debug control register from STATE. */
517
518void
519i386_dr_low_set_control (const struct i386_debug_reg_state *state)
520{
964e4306 521 /* Only update the threads of this process. */
aa5ca48f
DE
522 int pid = pid_of (get_thread_lwp (current_inferior));
523
964e4306
PA
524 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
525}
aa5ca48f 526
964e4306
PA
527/* Return the inferior's DR7 debug control register. */
528
529unsigned
530i386_dr_low_get_control (void)
531{
532 struct lwp_info *lwp = get_thread_lwp (current_inferior);
533 ptid_t ptid = ptid_of (lwp);
534
535 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
536}
537
538/* Get the value of the DR6 debug status register from the inferior
539 and record it in STATE. */
540
964e4306
PA
541unsigned
542i386_dr_low_get_status (void)
aa5ca48f
DE
543{
544 struct lwp_info *lwp = get_thread_lwp (current_inferior);
545 ptid_t ptid = ptid_of (lwp);
546
964e4306 547 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
548}
549\f
90d74c30 550/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
551
552static int
553x86_insert_point (char type, CORE_ADDR addr, int len)
554{
555 struct process_info *proc = current_process ();
556 switch (type)
557 {
8b07ae33 558 case '0':
90d74c30
PA
559 {
560 int ret;
561
562 ret = prepare_to_access_memory ();
563 if (ret)
564 return -1;
565 ret = set_gdb_breakpoint_at (addr);
0146f85b 566 done_accessing_memory ();
90d74c30
PA
567 return ret;
568 }
aa5ca48f
DE
569 case '2':
570 case '3':
571 case '4':
572 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
573 type, addr, len);
574 default:
575 /* Unsupported. */
576 return 1;
577 }
578}
579
580static int
581x86_remove_point (char type, CORE_ADDR addr, int len)
582{
583 struct process_info *proc = current_process ();
584 switch (type)
585 {
8b07ae33 586 case '0':
90d74c30
PA
587 {
588 int ret;
589
590 ret = prepare_to_access_memory ();
591 if (ret)
592 return -1;
593 ret = delete_gdb_breakpoint_at (addr);
0146f85b 594 done_accessing_memory ();
90d74c30
PA
595 return ret;
596 }
aa5ca48f
DE
597 case '2':
598 case '3':
599 case '4':
600 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
601 type, addr, len);
602 default:
603 /* Unsupported. */
604 return 1;
605 }
606}
607
608static int
609x86_stopped_by_watchpoint (void)
610{
611 struct process_info *proc = current_process ();
612 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
613}
614
615static CORE_ADDR
616x86_stopped_data_address (void)
617{
618 struct process_info *proc = current_process ();
619 CORE_ADDR addr;
620 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
621 &addr))
622 return addr;
623 return 0;
624}
625\f
626/* Called when a new process is created. */
627
628static struct arch_process_info *
629x86_linux_new_process (void)
630{
631 struct arch_process_info *info = xcalloc (1, sizeof (*info));
632
633 i386_low_init_dregs (&info->debug_reg_state);
634
635 return info;
636}
637
638/* Called when a new thread is detected. */
639
640static struct arch_lwp_info *
641x86_linux_new_thread (void)
642{
643 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
644
645 info->debug_registers_changed = 1;
646
647 return info;
648}
649
650/* Called when resuming a thread.
651 If the debug regs have changed, update the thread's copies. */
652
653static void
654x86_linux_prepare_to_resume (struct lwp_info *lwp)
655{
b9a881c2 656 ptid_t ptid = ptid_of (lwp);
6210a125 657 int clear_status = 0;
b9a881c2 658
aa5ca48f
DE
659 if (lwp->arch_private->debug_registers_changed)
660 {
661 int i;
aa5ca48f
DE
662 int pid = ptid_get_pid (ptid);
663 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
664 struct i386_debug_reg_state *state
665 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
666
667 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
668 if (state->dr_ref_count[i] > 0)
669 {
670 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
671
672 /* If we're setting a watchpoint, any change the inferior
673 had done itself to the debug registers needs to be
674 discarded, otherwise, i386_low_stopped_data_address can
675 get confused. */
676 clear_status = 1;
677 }
aa5ca48f
DE
678
679 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
680
681 lwp->arch_private->debug_registers_changed = 0;
682 }
b9a881c2 683
6210a125 684 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 685 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
686}
687\f
d0722149
DE
688/* When GDBSERVER is built as a 64-bit application on linux, the
689 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
690 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
691 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
692 conversion in-place ourselves. */
693
694/* These types below (compat_*) define a siginfo type that is layout
695 compatible with the siginfo type exported by the 32-bit userspace
696 support. */
697
698#ifdef __x86_64__
699
700typedef int compat_int_t;
701typedef unsigned int compat_uptr_t;
702
703typedef int compat_time_t;
704typedef int compat_timer_t;
705typedef int compat_clock_t;
706
707struct compat_timeval
708{
709 compat_time_t tv_sec;
710 int tv_usec;
711};
712
713typedef union compat_sigval
714{
715 compat_int_t sival_int;
716 compat_uptr_t sival_ptr;
717} compat_sigval_t;
718
719typedef struct compat_siginfo
720{
721 int si_signo;
722 int si_errno;
723 int si_code;
724
725 union
726 {
727 int _pad[((128 / sizeof (int)) - 3)];
728
729 /* kill() */
730 struct
731 {
732 unsigned int _pid;
733 unsigned int _uid;
734 } _kill;
735
736 /* POSIX.1b timers */
737 struct
738 {
739 compat_timer_t _tid;
740 int _overrun;
741 compat_sigval_t _sigval;
742 } _timer;
743
744 /* POSIX.1b signals */
745 struct
746 {
747 unsigned int _pid;
748 unsigned int _uid;
749 compat_sigval_t _sigval;
750 } _rt;
751
752 /* SIGCHLD */
753 struct
754 {
755 unsigned int _pid;
756 unsigned int _uid;
757 int _status;
758 compat_clock_t _utime;
759 compat_clock_t _stime;
760 } _sigchld;
761
762 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
763 struct
764 {
765 unsigned int _addr;
766 } _sigfault;
767
768 /* SIGPOLL */
769 struct
770 {
771 int _band;
772 int _fd;
773 } _sigpoll;
774 } _sifields;
775} compat_siginfo_t;
776
777#define cpt_si_pid _sifields._kill._pid
778#define cpt_si_uid _sifields._kill._uid
779#define cpt_si_timerid _sifields._timer._tid
780#define cpt_si_overrun _sifields._timer._overrun
781#define cpt_si_status _sifields._sigchld._status
782#define cpt_si_utime _sifields._sigchld._utime
783#define cpt_si_stime _sifields._sigchld._stime
784#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
785#define cpt_si_addr _sifields._sigfault._addr
786#define cpt_si_band _sifields._sigpoll._band
787#define cpt_si_fd _sifields._sigpoll._fd
788
789/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
790 In their place is si_timer1,si_timer2. */
791#ifndef si_timerid
792#define si_timerid si_timer1
793#endif
794#ifndef si_overrun
795#define si_overrun si_timer2
796#endif
797
798static void
799compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
800{
801 memset (to, 0, sizeof (*to));
802
803 to->si_signo = from->si_signo;
804 to->si_errno = from->si_errno;
805 to->si_code = from->si_code;
806
b53a1623 807 if (to->si_code == SI_TIMER)
d0722149 808 {
b53a1623
PA
809 to->cpt_si_timerid = from->si_timerid;
810 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
811 to->cpt_si_ptr = (intptr_t) from->si_ptr;
812 }
813 else if (to->si_code == SI_USER)
814 {
815 to->cpt_si_pid = from->si_pid;
816 to->cpt_si_uid = from->si_uid;
817 }
b53a1623 818 else if (to->si_code < 0)
d0722149 819 {
b53a1623
PA
820 to->cpt_si_pid = from->si_pid;
821 to->cpt_si_uid = from->si_uid;
d0722149
DE
822 to->cpt_si_ptr = (intptr_t) from->si_ptr;
823 }
824 else
825 {
826 switch (to->si_signo)
827 {
828 case SIGCHLD:
829 to->cpt_si_pid = from->si_pid;
830 to->cpt_si_uid = from->si_uid;
831 to->cpt_si_status = from->si_status;
832 to->cpt_si_utime = from->si_utime;
833 to->cpt_si_stime = from->si_stime;
834 break;
835 case SIGILL:
836 case SIGFPE:
837 case SIGSEGV:
838 case SIGBUS:
839 to->cpt_si_addr = (intptr_t) from->si_addr;
840 break;
841 case SIGPOLL:
842 to->cpt_si_band = from->si_band;
843 to->cpt_si_fd = from->si_fd;
844 break;
845 default:
846 to->cpt_si_pid = from->si_pid;
847 to->cpt_si_uid = from->si_uid;
848 to->cpt_si_ptr = (intptr_t) from->si_ptr;
849 break;
850 }
851 }
852}
853
854static void
855siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
856{
857 memset (to, 0, sizeof (*to));
858
859 to->si_signo = from->si_signo;
860 to->si_errno = from->si_errno;
861 to->si_code = from->si_code;
862
b53a1623 863 if (to->si_code == SI_TIMER)
d0722149 864 {
b53a1623
PA
865 to->si_timerid = from->cpt_si_timerid;
866 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
867 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
868 }
869 else if (to->si_code == SI_USER)
870 {
871 to->si_pid = from->cpt_si_pid;
872 to->si_uid = from->cpt_si_uid;
873 }
b53a1623 874 else if (to->si_code < 0)
d0722149 875 {
b53a1623
PA
876 to->si_pid = from->cpt_si_pid;
877 to->si_uid = from->cpt_si_uid;
d0722149
DE
878 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
879 }
880 else
881 {
882 switch (to->si_signo)
883 {
884 case SIGCHLD:
885 to->si_pid = from->cpt_si_pid;
886 to->si_uid = from->cpt_si_uid;
887 to->si_status = from->cpt_si_status;
888 to->si_utime = from->cpt_si_utime;
889 to->si_stime = from->cpt_si_stime;
890 break;
891 case SIGILL:
892 case SIGFPE:
893 case SIGSEGV:
894 case SIGBUS:
895 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
896 break;
897 case SIGPOLL:
898 to->si_band = from->cpt_si_band;
899 to->si_fd = from->cpt_si_fd;
900 break;
901 default:
902 to->si_pid = from->cpt_si_pid;
903 to->si_uid = from->cpt_si_uid;
904 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
905 break;
906 }
907 }
908}
909
910#endif /* __x86_64__ */
911
912/* Convert a native/host siginfo object, into/from the siginfo in the
913 layout of the inferiors' architecture. Returns true if any
914 conversion was done; false otherwise. If DIRECTION is 1, then copy
915 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
916 INF. */
917
918static int
919x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
920{
921#ifdef __x86_64__
922 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
923 if (register_size (0) == 4)
924 {
9f1036c1
DE
925 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
926 fatal ("unexpected difference in siginfo");
d0722149
DE
927
928 if (direction == 0)
929 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
930 else
931 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
932
933 return 1;
934 }
935#endif
936
937 return 0;
938}
939\f
1570b33e
L
940static int use_xml;
941
942/* Update gdbserver_xmltarget. */
943
944static void
945x86_linux_update_xmltarget (void)
946{
3a13a53b
L
947 int pid;
948 struct regset_info *regset;
1570b33e
L
949 static unsigned long long xcr0;
950 static int have_ptrace_getregset = -1;
59e04013 951#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
952 static int have_ptrace_getfpxregs = -1;
953#endif
1570b33e
L
954
955 if (!current_inferior)
956 return;
957
45ba0d02
PA
958 /* Before changing the register cache internal layout or the target
959 regsets, flush the contents of the current valid caches back to
960 the threads. */
961 regcache_invalidate ();
962
3a13a53b 963 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
964#ifdef __x86_64__
965 if (num_xmm_registers == 8)
966 init_registers_i386_linux ();
967 else
968 init_registers_amd64_linux ();
969#else
3a13a53b
L
970 {
971# ifdef HAVE_PTRACE_GETFPXREGS
972 if (have_ptrace_getfpxregs == -1)
973 {
974 elf_fpxregset_t fpxregs;
975
976 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
977 {
978 have_ptrace_getfpxregs = 0;
979 x86_xcr0 = I386_XSTATE_X87_MASK;
980
981 /* Disable PTRACE_GETFPXREGS. */
982 for (regset = target_regsets;
983 regset->fill_function != NULL; regset++)
984 if (regset->get_request == PTRACE_GETFPXREGS)
985 {
986 regset->size = 0;
987 break;
988 }
989 }
990 else
991 have_ptrace_getfpxregs = 1;
992 }
993
994 if (!have_ptrace_getfpxregs)
995 {
996 init_registers_i386_mmx_linux ();
997 return;
998 }
999# endif
1000 init_registers_i386_linux ();
1001 }
1570b33e
L
1002#endif
1003
1004 if (!use_xml)
1005 {
1006 /* Don't use XML. */
1007#ifdef __x86_64__
1008 if (num_xmm_registers == 8)
1009 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1010 else
1011 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1012#else
1013 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1014#endif
1015
1016 x86_xcr0 = I386_XSTATE_SSE_MASK;
1017
1018 return;
1019 }
1020
1021 /* Check if XSAVE extended state is supported. */
1022 if (have_ptrace_getregset == -1)
1023 {
1570b33e
L
1024 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1025 struct iovec iov;
1570b33e
L
1026
1027 iov.iov_base = xstateregs;
1028 iov.iov_len = sizeof (xstateregs);
1029
1030 /* Check if PTRACE_GETREGSET works. */
1031 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1032 &iov) < 0)
1033 {
1034 have_ptrace_getregset = 0;
1035 return;
1036 }
1037 else
1038 have_ptrace_getregset = 1;
1039
1040 /* Get XCR0 from XSAVE extended state at byte 464. */
1041 xcr0 = xstateregs[464 / sizeof (long long)];
1042
1043 /* Use PTRACE_GETREGSET if it is available. */
1044 for (regset = target_regsets;
1045 regset->fill_function != NULL; regset++)
1046 if (regset->get_request == PTRACE_GETREGSET)
1047 regset->size = I386_XSTATE_SIZE (xcr0);
1048 else if (regset->type != GENERAL_REGS)
1049 regset->size = 0;
1050 }
1051
1052 if (have_ptrace_getregset)
1053 {
1054 /* AVX is the highest feature we support. */
1055 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1056 {
1057 x86_xcr0 = xcr0;
1058
1059#ifdef __x86_64__
1060 /* I386 has 8 xmm regs. */
1061 if (num_xmm_registers == 8)
1062 init_registers_i386_avx_linux ();
1063 else
1064 init_registers_amd64_avx_linux ();
1065#else
1066 init_registers_i386_avx_linux ();
1067#endif
1068 }
1069 }
1070}
1071
1072/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1073 PTRACE_GETREGSET. */
1074
1075static void
1076x86_linux_process_qsupported (const char *query)
1077{
1078 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1079 with "i386" in qSupported query, it supports x86 XML target
1080 descriptions. */
1081 use_xml = 0;
1082 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1083 {
1084 char *copy = xstrdup (query + 13);
1085 char *p;
1086
1087 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1088 {
1089 if (strcmp (p, "i386") == 0)
1090 {
1091 use_xml = 1;
1092 break;
1093 }
1094 }
1095
1096 free (copy);
1097 }
1098
1099 x86_linux_update_xmltarget ();
1100}
1101
9f1036c1 1102/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1103
1104static void
1105x86_arch_setup (void)
1106{
1107#ifdef __x86_64__
1108 int pid = pid_of (get_thread_lwp (current_inferior));
1109 char *file = linux_child_pid_to_exec_file (pid);
1110 int use_64bit = elf_64_file_p (file);
1111
1112 free (file);
1113
1114 if (use_64bit < 0)
1115 {
1116 /* This can only happen if /proc/<pid>/exe is unreadable,
1117 but "that can't happen" if we've gotten this far.
1118 Fall through and assume this is a 32-bit program. */
1119 }
1120 else if (use_64bit)
1121 {
d0722149
DE
1122 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1123 the_low_target.num_regs = -1;
1124 the_low_target.regmap = NULL;
1125 the_low_target.cannot_fetch_register = NULL;
1126 the_low_target.cannot_store_register = NULL;
1127
1128 /* Amd64 has 16 xmm regs. */
1129 num_xmm_registers = 16;
1130
1570b33e 1131 x86_linux_update_xmltarget ();
d0722149
DE
1132 return;
1133 }
1134#endif
1135
1136 /* Ok we have a 32-bit inferior. */
1137
d0722149
DE
1138 the_low_target.num_regs = I386_NUM_REGS;
1139 the_low_target.regmap = i386_regmap;
1140 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1141 the_low_target.cannot_store_register = i386_cannot_store_register;
1142
1143 /* I386 has 8 xmm regs. */
1144 num_xmm_registers = 8;
1570b33e
L
1145
1146 x86_linux_update_xmltarget ();
d0722149
DE
1147}
1148
219f2f23
PA
1149static int
1150x86_supports_tracepoints (void)
1151{
1152 return 1;
1153}
1154
fa593d66
PA
1155static void
1156append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1157{
1158 write_inferior_memory (*to, buf, len);
1159 *to += len;
1160}
1161
1162static int
1163push_opcode (unsigned char *buf, char *op)
1164{
1165 unsigned char *buf_org = buf;
1166
1167 while (1)
1168 {
1169 char *endptr;
1170 unsigned long ul = strtoul (op, &endptr, 16);
1171
1172 if (endptr == op)
1173 break;
1174
1175 *buf++ = ul;
1176 op = endptr;
1177 }
1178
1179 return buf - buf_org;
1180}
1181
1182#ifdef __x86_64__
1183
1184/* Build a jump pad that saves registers and calls a collection
1185 function. Writes a jump instruction to the jump pad to
1186 JJUMPAD_INSN. The caller is responsible to write it in at the
1187 tracepoint address. */
1188
1189static int
1190amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1191 CORE_ADDR collector,
1192 CORE_ADDR lockaddr,
1193 ULONGEST orig_size,
1194 CORE_ADDR *jump_entry,
405f8e94
SS
1195 CORE_ADDR *trampoline,
1196 ULONGEST *trampoline_size,
fa593d66
PA
1197 unsigned char *jjump_pad_insn,
1198 ULONGEST *jjump_pad_insn_size,
1199 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1200 CORE_ADDR *adjusted_insn_addr_end,
1201 char *err)
fa593d66
PA
1202{
1203 unsigned char buf[40];
1204 int i, offset;
1205 CORE_ADDR buildaddr = *jump_entry;
1206
1207 /* Build the jump pad. */
1208
1209 /* First, do tracepoint data collection. Save registers. */
1210 i = 0;
1211 /* Need to ensure stack pointer saved first. */
1212 buf[i++] = 0x54; /* push %rsp */
1213 buf[i++] = 0x55; /* push %rbp */
1214 buf[i++] = 0x57; /* push %rdi */
1215 buf[i++] = 0x56; /* push %rsi */
1216 buf[i++] = 0x52; /* push %rdx */
1217 buf[i++] = 0x51; /* push %rcx */
1218 buf[i++] = 0x53; /* push %rbx */
1219 buf[i++] = 0x50; /* push %rax */
1220 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1221 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1222 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1223 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1224 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1225 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1226 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1227 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1228 buf[i++] = 0x9c; /* pushfq */
1229 buf[i++] = 0x48; /* movl <addr>,%rdi */
1230 buf[i++] = 0xbf;
1231 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1232 i += sizeof (unsigned long);
1233 buf[i++] = 0x57; /* push %rdi */
1234 append_insns (&buildaddr, i, buf);
1235
1236 /* Stack space for the collecting_t object. */
1237 i = 0;
1238 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1239 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1240 memcpy (buf + i, &tpoint, 8);
1241 i += 8;
1242 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1243 i += push_opcode (&buf[i],
1244 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1245 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1246 append_insns (&buildaddr, i, buf);
1247
1248 /* spin-lock. */
1249 i = 0;
1250 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1251 memcpy (&buf[i], (void *) &lockaddr, 8);
1252 i += 8;
1253 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1254 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1255 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1256 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1257 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Set up the gdb_collect call. */
1261 /* At this point, (stack pointer + 0x18) is the base of our saved
1262 register block. */
1263
1264 i = 0;
1265 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1266 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1267
1268 /* tpoint address may be 64-bit wide. */
1269 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1270 memcpy (buf + i, &tpoint, 8);
1271 i += 8;
1272 append_insns (&buildaddr, i, buf);
1273
1274 /* The collector function being in the shared library, may be
1275 >31-bits away off the jump pad. */
1276 i = 0;
1277 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1278 memcpy (buf + i, &collector, 8);
1279 i += 8;
1280 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1281 append_insns (&buildaddr, i, buf);
1282
1283 /* Clear the spin-lock. */
1284 i = 0;
1285 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1286 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1287 memcpy (buf + i, &lockaddr, 8);
1288 i += 8;
1289 append_insns (&buildaddr, i, buf);
1290
1291 /* Remove stack that had been used for the collect_t object. */
1292 i = 0;
1293 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1294 append_insns (&buildaddr, i, buf);
1295
1296 /* Restore register state. */
1297 i = 0;
1298 buf[i++] = 0x48; /* add $0x8,%rsp */
1299 buf[i++] = 0x83;
1300 buf[i++] = 0xc4;
1301 buf[i++] = 0x08;
1302 buf[i++] = 0x9d; /* popfq */
1303 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1304 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1305 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1306 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1307 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1308 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1309 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1310 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1311 buf[i++] = 0x58; /* pop %rax */
1312 buf[i++] = 0x5b; /* pop %rbx */
1313 buf[i++] = 0x59; /* pop %rcx */
1314 buf[i++] = 0x5a; /* pop %rdx */
1315 buf[i++] = 0x5e; /* pop %rsi */
1316 buf[i++] = 0x5f; /* pop %rdi */
1317 buf[i++] = 0x5d; /* pop %rbp */
1318 buf[i++] = 0x5c; /* pop %rsp */
1319 append_insns (&buildaddr, i, buf);
1320
1321 /* Now, adjust the original instruction to execute in the jump
1322 pad. */
1323 *adjusted_insn_addr = buildaddr;
1324 relocate_instruction (&buildaddr, tpaddr);
1325 *adjusted_insn_addr_end = buildaddr;
1326
1327 /* Finally, write a jump back to the program. */
1328 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1329 memcpy (buf, jump_insn, sizeof (jump_insn));
1330 memcpy (buf + 1, &offset, 4);
1331 append_insns (&buildaddr, sizeof (jump_insn), buf);
1332
1333 /* The jump pad is now built. Wire in a jump to our jump pad. This
1334 is always done last (by our caller actually), so that we can
1335 install fast tracepoints with threads running. This relies on
1336 the agent's atomic write support. */
1337 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1338 memcpy (buf, jump_insn, sizeof (jump_insn));
1339 memcpy (buf + 1, &offset, 4);
1340 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1341 *jjump_pad_insn_size = sizeof (jump_insn);
1342
1343 /* Return the end address of our pad. */
1344 *jump_entry = buildaddr;
1345
1346 return 0;
1347}
1348
1349#endif /* __x86_64__ */
1350
1351/* Build a jump pad that saves registers and calls a collection
1352 function. Writes a jump instruction to the jump pad to
1353 JJUMPAD_INSN. The caller is responsible to write it in at the
1354 tracepoint address. */
1355
1356static int
1357i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1358 CORE_ADDR collector,
1359 CORE_ADDR lockaddr,
1360 ULONGEST orig_size,
1361 CORE_ADDR *jump_entry,
405f8e94
SS
1362 CORE_ADDR *trampoline,
1363 ULONGEST *trampoline_size,
fa593d66
PA
1364 unsigned char *jjump_pad_insn,
1365 ULONGEST *jjump_pad_insn_size,
1366 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1367 CORE_ADDR *adjusted_insn_addr_end,
1368 char *err)
fa593d66
PA
1369{
1370 unsigned char buf[0x100];
1371 int i, offset;
1372 CORE_ADDR buildaddr = *jump_entry;
1373
1374 /* Build the jump pad. */
1375
1376 /* First, do tracepoint data collection. Save registers. */
1377 i = 0;
1378 buf[i++] = 0x60; /* pushad */
1379 buf[i++] = 0x68; /* push tpaddr aka $pc */
1380 *((int *)(buf + i)) = (int) tpaddr;
1381 i += 4;
1382 buf[i++] = 0x9c; /* pushf */
1383 buf[i++] = 0x1e; /* push %ds */
1384 buf[i++] = 0x06; /* push %es */
1385 buf[i++] = 0x0f; /* push %fs */
1386 buf[i++] = 0xa0;
1387 buf[i++] = 0x0f; /* push %gs */
1388 buf[i++] = 0xa8;
1389 buf[i++] = 0x16; /* push %ss */
1390 buf[i++] = 0x0e; /* push %cs */
1391 append_insns (&buildaddr, i, buf);
1392
1393 /* Stack space for the collecting_t object. */
1394 i = 0;
1395 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1396
1397 /* Build the object. */
1398 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1399 memcpy (buf + i, &tpoint, 4);
1400 i += 4;
1401 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1402
1403 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1404 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1405 append_insns (&buildaddr, i, buf);
1406
1407 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1408 If we cared for it, this could be using xchg alternatively. */
1409
1410 i = 0;
1411 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1412 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1413 %esp,<lockaddr> */
1414 memcpy (&buf[i], (void *) &lockaddr, 4);
1415 i += 4;
1416 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1417 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1418 append_insns (&buildaddr, i, buf);
1419
1420
1421 /* Set up arguments to the gdb_collect call. */
1422 i = 0;
1423 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1424 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1425 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1426 append_insns (&buildaddr, i, buf);
1427
1428 i = 0;
1429 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1430 append_insns (&buildaddr, i, buf);
1431
1432 i = 0;
1433 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1434 memcpy (&buf[i], (void *) &tpoint, 4);
1435 i += 4;
1436 append_insns (&buildaddr, i, buf);
1437
1438 buf[0] = 0xe8; /* call <reladdr> */
1439 offset = collector - (buildaddr + sizeof (jump_insn));
1440 memcpy (buf + 1, &offset, 4);
1441 append_insns (&buildaddr, 5, buf);
1442 /* Clean up after the call. */
1443 buf[0] = 0x83; /* add $0x8,%esp */
1444 buf[1] = 0xc4;
1445 buf[2] = 0x08;
1446 append_insns (&buildaddr, 3, buf);
1447
1448
1449 /* Clear the spin-lock. This would need the LOCK prefix on older
1450 broken archs. */
1451 i = 0;
1452 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1453 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1454 memcpy (buf + i, &lockaddr, 4);
1455 i += 4;
1456 append_insns (&buildaddr, i, buf);
1457
1458
1459 /* Remove stack that had been used for the collect_t object. */
1460 i = 0;
1461 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1462 append_insns (&buildaddr, i, buf);
1463
1464 i = 0;
1465 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1466 buf[i++] = 0xc4;
1467 buf[i++] = 0x04;
1468 buf[i++] = 0x17; /* pop %ss */
1469 buf[i++] = 0x0f; /* pop %gs */
1470 buf[i++] = 0xa9;
1471 buf[i++] = 0x0f; /* pop %fs */
1472 buf[i++] = 0xa1;
1473 buf[i++] = 0x07; /* pop %es */
405f8e94 1474 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1475 buf[i++] = 0x9d; /* popf */
1476 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1477 buf[i++] = 0xc4;
1478 buf[i++] = 0x04;
1479 buf[i++] = 0x61; /* popad */
1480 append_insns (&buildaddr, i, buf);
1481
1482 /* Now, adjust the original instruction to execute in the jump
1483 pad. */
1484 *adjusted_insn_addr = buildaddr;
1485 relocate_instruction (&buildaddr, tpaddr);
1486 *adjusted_insn_addr_end = buildaddr;
1487
1488 /* Write the jump back to the program. */
1489 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1490 memcpy (buf, jump_insn, sizeof (jump_insn));
1491 memcpy (buf + 1, &offset, 4);
1492 append_insns (&buildaddr, sizeof (jump_insn), buf);
1493
1494 /* The jump pad is now built. Wire in a jump to our jump pad. This
1495 is always done last (by our caller actually), so that we can
1496 install fast tracepoints with threads running. This relies on
1497 the agent's atomic write support. */
405f8e94
SS
1498 if (orig_size == 4)
1499 {
1500 /* Create a trampoline. */
1501 *trampoline_size = sizeof (jump_insn);
1502 if (!claim_trampoline_space (*trampoline_size, trampoline))
1503 {
1504 /* No trampoline space available. */
1505 strcpy (err,
1506 "E.Cannot allocate trampoline space needed for fast "
1507 "tracepoints on 4-byte instructions.");
1508 return 1;
1509 }
1510
1511 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1512 memcpy (buf, jump_insn, sizeof (jump_insn));
1513 memcpy (buf + 1, &offset, 4);
1514 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1515
1516 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1517 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1518 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1519 memcpy (buf + 2, &offset, 2);
1520 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1521 *jjump_pad_insn_size = sizeof (small_jump_insn);
1522 }
1523 else
1524 {
1525 /* Else use a 32-bit relative jump instruction. */
1526 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1527 memcpy (buf, jump_insn, sizeof (jump_insn));
1528 memcpy (buf + 1, &offset, 4);
1529 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1530 *jjump_pad_insn_size = sizeof (jump_insn);
1531 }
fa593d66
PA
1532
1533 /* Return the end address of our pad. */
1534 *jump_entry = buildaddr;
1535
1536 return 0;
1537}
1538
1539static int
1540x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1541 CORE_ADDR collector,
1542 CORE_ADDR lockaddr,
1543 ULONGEST orig_size,
1544 CORE_ADDR *jump_entry,
405f8e94
SS
1545 CORE_ADDR *trampoline,
1546 ULONGEST *trampoline_size,
fa593d66
PA
1547 unsigned char *jjump_pad_insn,
1548 ULONGEST *jjump_pad_insn_size,
1549 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1550 CORE_ADDR *adjusted_insn_addr_end,
1551 char *err)
fa593d66
PA
1552{
1553#ifdef __x86_64__
1554 if (register_size (0) == 8)
1555 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1556 collector, lockaddr,
1557 orig_size, jump_entry,
405f8e94 1558 trampoline, trampoline_size,
fa593d66
PA
1559 jjump_pad_insn,
1560 jjump_pad_insn_size,
1561 adjusted_insn_addr,
405f8e94
SS
1562 adjusted_insn_addr_end,
1563 err);
fa593d66
PA
1564#endif
1565
1566 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1567 collector, lockaddr,
1568 orig_size, jump_entry,
405f8e94 1569 trampoline, trampoline_size,
fa593d66
PA
1570 jjump_pad_insn,
1571 jjump_pad_insn_size,
1572 adjusted_insn_addr,
405f8e94
SS
1573 adjusted_insn_addr_end,
1574 err);
1575}
1576
1577/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1578 architectures. */
1579
1580static int
1581x86_get_min_fast_tracepoint_insn_len (void)
1582{
1583 static int warned_about_fast_tracepoints = 0;
1584
1585#ifdef __x86_64__
1586 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1587 used for fast tracepoints. */
1588 if (register_size (0) == 8)
1589 return 5;
1590#endif
1591
1592 if (in_process_agent_loaded ())
1593 {
1594 char errbuf[IPA_BUFSIZ];
1595
1596 errbuf[0] = '\0';
1597
1598 /* On x86, if trampolines are available, then 4-byte jump instructions
1599 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1600 with a 4-byte offset are used instead. */
1601 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1602 return 4;
1603 else
1604 {
1605 /* GDB has no channel to explain to user why a shorter fast
1606 tracepoint is not possible, but at least make GDBserver
1607 mention that something has gone awry. */
1608 if (!warned_about_fast_tracepoints)
1609 {
1610 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1611 warned_about_fast_tracepoints = 1;
1612 }
1613 return 5;
1614 }
1615 }
1616 else
1617 {
1618 /* Indicate that the minimum length is currently unknown since the IPA
1619 has not loaded yet. */
1620 return 0;
1621 }
fa593d66
PA
1622}
1623
6a271cae
PA
1624static void
1625add_insns (unsigned char *start, int len)
1626{
1627 CORE_ADDR buildaddr = current_insn_ptr;
1628
1629 if (debug_threads)
1630 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1631 len, paddress (buildaddr));
1632
1633 append_insns (&buildaddr, len, start);
1634 current_insn_ptr = buildaddr;
1635}
1636
6a271cae
PA
1637/* Our general strategy for emitting code is to avoid specifying raw
1638 bytes whenever possible, and instead copy a block of inline asm
1639 that is embedded in the function. This is a little messy, because
1640 we need to keep the compiler from discarding what looks like dead
1641 code, plus suppress various warnings. */
1642
9e4344e5
PA
1643#define EMIT_ASM(NAME, INSNS) \
1644 do \
1645 { \
1646 extern unsigned char start_ ## NAME, end_ ## NAME; \
1647 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1648 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1649 "\t" "start_" #NAME ":" \
1650 "\t" INSNS "\n" \
1651 "\t" "end_" #NAME ":"); \
1652 } while (0)
6a271cae
PA
1653
1654#ifdef __x86_64__
1655
1656#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1657 do \
1658 { \
1659 extern unsigned char start_ ## NAME, end_ ## NAME; \
1660 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1661 __asm__ (".code32\n" \
1662 "\t" "jmp end_" #NAME "\n" \
1663 "\t" "start_" #NAME ":\n" \
1664 "\t" INSNS "\n" \
1665 "\t" "end_" #NAME ":\n" \
1666 ".code64\n"); \
1667 } while (0)
6a271cae
PA
1668
1669#else
1670
1671#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1672
1673#endif
1674
1675#ifdef __x86_64__
1676
1677static void
1678amd64_emit_prologue (void)
1679{
1680 EMIT_ASM (amd64_prologue,
1681 "pushq %rbp\n\t"
1682 "movq %rsp,%rbp\n\t"
1683 "sub $0x20,%rsp\n\t"
1684 "movq %rdi,-8(%rbp)\n\t"
1685 "movq %rsi,-16(%rbp)");
1686}
1687
1688
1689static void
1690amd64_emit_epilogue (void)
1691{
1692 EMIT_ASM (amd64_epilogue,
1693 "movq -16(%rbp),%rdi\n\t"
1694 "movq %rax,(%rdi)\n\t"
1695 "xor %rax,%rax\n\t"
1696 "leave\n\t"
1697 "ret");
1698}
1699
1700static void
1701amd64_emit_add (void)
1702{
1703 EMIT_ASM (amd64_add,
1704 "add (%rsp),%rax\n\t"
1705 "lea 0x8(%rsp),%rsp");
1706}
1707
1708static void
1709amd64_emit_sub (void)
1710{
1711 EMIT_ASM (amd64_sub,
1712 "sub %rax,(%rsp)\n\t"
1713 "pop %rax");
1714}
1715
1716static void
1717amd64_emit_mul (void)
1718{
1719 emit_error = 1;
1720}
1721
1722static void
1723amd64_emit_lsh (void)
1724{
1725 emit_error = 1;
1726}
1727
1728static void
1729amd64_emit_rsh_signed (void)
1730{
1731 emit_error = 1;
1732}
1733
1734static void
1735amd64_emit_rsh_unsigned (void)
1736{
1737 emit_error = 1;
1738}
1739
1740static void
1741amd64_emit_ext (int arg)
1742{
1743 switch (arg)
1744 {
1745 case 8:
1746 EMIT_ASM (amd64_ext_8,
1747 "cbtw\n\t"
1748 "cwtl\n\t"
1749 "cltq");
1750 break;
1751 case 16:
1752 EMIT_ASM (amd64_ext_16,
1753 "cwtl\n\t"
1754 "cltq");
1755 break;
1756 case 32:
1757 EMIT_ASM (amd64_ext_32,
1758 "cltq");
1759 break;
1760 default:
1761 emit_error = 1;
1762 }
1763}
1764
1765static void
1766amd64_emit_log_not (void)
1767{
1768 EMIT_ASM (amd64_log_not,
1769 "test %rax,%rax\n\t"
1770 "sete %cl\n\t"
1771 "movzbq %cl,%rax");
1772}
1773
1774static void
1775amd64_emit_bit_and (void)
1776{
1777 EMIT_ASM (amd64_and,
1778 "and (%rsp),%rax\n\t"
1779 "lea 0x8(%rsp),%rsp");
1780}
1781
1782static void
1783amd64_emit_bit_or (void)
1784{
1785 EMIT_ASM (amd64_or,
1786 "or (%rsp),%rax\n\t"
1787 "lea 0x8(%rsp),%rsp");
1788}
1789
1790static void
1791amd64_emit_bit_xor (void)
1792{
1793 EMIT_ASM (amd64_xor,
1794 "xor (%rsp),%rax\n\t"
1795 "lea 0x8(%rsp),%rsp");
1796}
1797
1798static void
1799amd64_emit_bit_not (void)
1800{
1801 EMIT_ASM (amd64_bit_not,
1802 "xorq $0xffffffffffffffff,%rax");
1803}
1804
1805static void
1806amd64_emit_equal (void)
1807{
1808 EMIT_ASM (amd64_equal,
1809 "cmp %rax,(%rsp)\n\t"
1810 "je .Lamd64_equal_true\n\t"
1811 "xor %rax,%rax\n\t"
1812 "jmp .Lamd64_equal_end\n\t"
1813 ".Lamd64_equal_true:\n\t"
1814 "mov $0x1,%rax\n\t"
1815 ".Lamd64_equal_end:\n\t"
1816 "lea 0x8(%rsp),%rsp");
1817}
1818
1819static void
1820amd64_emit_less_signed (void)
1821{
1822 EMIT_ASM (amd64_less_signed,
1823 "cmp %rax,(%rsp)\n\t"
1824 "jl .Lamd64_less_signed_true\n\t"
1825 "xor %rax,%rax\n\t"
1826 "jmp .Lamd64_less_signed_end\n\t"
1827 ".Lamd64_less_signed_true:\n\t"
1828 "mov $1,%rax\n\t"
1829 ".Lamd64_less_signed_end:\n\t"
1830 "lea 0x8(%rsp),%rsp");
1831}
1832
1833static void
1834amd64_emit_less_unsigned (void)
1835{
1836 EMIT_ASM (amd64_less_unsigned,
1837 "cmp %rax,(%rsp)\n\t"
1838 "jb .Lamd64_less_unsigned_true\n\t"
1839 "xor %rax,%rax\n\t"
1840 "jmp .Lamd64_less_unsigned_end\n\t"
1841 ".Lamd64_less_unsigned_true:\n\t"
1842 "mov $1,%rax\n\t"
1843 ".Lamd64_less_unsigned_end:\n\t"
1844 "lea 0x8(%rsp),%rsp");
1845}
1846
1847static void
1848amd64_emit_ref (int size)
1849{
1850 switch (size)
1851 {
1852 case 1:
1853 EMIT_ASM (amd64_ref1,
1854 "movb (%rax),%al");
1855 break;
1856 case 2:
1857 EMIT_ASM (amd64_ref2,
1858 "movw (%rax),%ax");
1859 break;
1860 case 4:
1861 EMIT_ASM (amd64_ref4,
1862 "movl (%rax),%eax");
1863 break;
1864 case 8:
1865 EMIT_ASM (amd64_ref8,
1866 "movq (%rax),%rax");
1867 break;
1868 }
1869}
1870
1871static void
1872amd64_emit_if_goto (int *offset_p, int *size_p)
1873{
1874 EMIT_ASM (amd64_if_goto,
1875 "mov %rax,%rcx\n\t"
1876 "pop %rax\n\t"
1877 "cmp $0,%rcx\n\t"
1878 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1879 if (offset_p)
1880 *offset_p = 10;
1881 if (size_p)
1882 *size_p = 4;
1883}
1884
1885static void
1886amd64_emit_goto (int *offset_p, int *size_p)
1887{
1888 EMIT_ASM (amd64_goto,
1889 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1890 if (offset_p)
1891 *offset_p = 1;
1892 if (size_p)
1893 *size_p = 4;
1894}
1895
1896static void
1897amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1898{
1899 int diff = (to - (from + size));
1900 unsigned char buf[sizeof (int)];
1901
1902 if (size != 4)
1903 {
1904 emit_error = 1;
1905 return;
1906 }
1907
1908 memcpy (buf, &diff, sizeof (int));
1909 write_inferior_memory (from, buf, sizeof (int));
1910}
1911
1912static void
4e29fb54 1913amd64_emit_const (LONGEST num)
6a271cae
PA
1914{
1915 unsigned char buf[16];
1916 int i;
1917 CORE_ADDR buildaddr = current_insn_ptr;
1918
1919 i = 0;
1920 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1921 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1922 i += 8;
1923 append_insns (&buildaddr, i, buf);
1924 current_insn_ptr = buildaddr;
1925}
1926
1927static void
1928amd64_emit_call (CORE_ADDR fn)
1929{
1930 unsigned char buf[16];
1931 int i;
1932 CORE_ADDR buildaddr;
4e29fb54 1933 LONGEST offset64;
6a271cae
PA
1934
1935 /* The destination function being in the shared library, may be
1936 >31-bits away off the compiled code pad. */
1937
1938 buildaddr = current_insn_ptr;
1939
1940 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1941
1942 i = 0;
1943
1944 if (offset64 > INT_MAX || offset64 < INT_MIN)
1945 {
1946 /* Offset is too large for a call. Use callq, but that requires
1947 a register, so avoid it if possible. Use r10, since it is
1948 call-clobbered, we don't have to push/pop it. */
1949 buf[i++] = 0x48; /* mov $fn,%r10 */
1950 buf[i++] = 0xba;
1951 memcpy (buf + i, &fn, 8);
1952 i += 8;
1953 buf[i++] = 0xff; /* callq *%r10 */
1954 buf[i++] = 0xd2;
1955 }
1956 else
1957 {
1958 int offset32 = offset64; /* we know we can't overflow here. */
1959 memcpy (buf + i, &offset32, 4);
1960 i += 4;
1961 }
1962
1963 append_insns (&buildaddr, i, buf);
1964 current_insn_ptr = buildaddr;
1965}
1966
1967static void
1968amd64_emit_reg (int reg)
1969{
1970 unsigned char buf[16];
1971 int i;
1972 CORE_ADDR buildaddr;
1973
1974 /* Assume raw_regs is still in %rdi. */
1975 buildaddr = current_insn_ptr;
1976 i = 0;
1977 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1978 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1979 i += 4;
1980 append_insns (&buildaddr, i, buf);
1981 current_insn_ptr = buildaddr;
1982 amd64_emit_call (get_raw_reg_func_addr ());
1983}
1984
1985static void
1986amd64_emit_pop (void)
1987{
1988 EMIT_ASM (amd64_pop,
1989 "pop %rax");
1990}
1991
1992static void
1993amd64_emit_stack_flush (void)
1994{
1995 EMIT_ASM (amd64_stack_flush,
1996 "push %rax");
1997}
1998
1999static void
2000amd64_emit_zero_ext (int arg)
2001{
2002 switch (arg)
2003 {
2004 case 8:
2005 EMIT_ASM (amd64_zero_ext_8,
2006 "and $0xff,%rax");
2007 break;
2008 case 16:
2009 EMIT_ASM (amd64_zero_ext_16,
2010 "and $0xffff,%rax");
2011 break;
2012 case 32:
2013 EMIT_ASM (amd64_zero_ext_32,
2014 "mov $0xffffffff,%rcx\n\t"
2015 "and %rcx,%rax");
2016 break;
2017 default:
2018 emit_error = 1;
2019 }
2020}
2021
2022static void
2023amd64_emit_swap (void)
2024{
2025 EMIT_ASM (amd64_swap,
2026 "mov %rax,%rcx\n\t"
2027 "pop %rax\n\t"
2028 "push %rcx");
2029}
2030
2031static void
2032amd64_emit_stack_adjust (int n)
2033{
2034 unsigned char buf[16];
2035 int i;
2036 CORE_ADDR buildaddr = current_insn_ptr;
2037
2038 i = 0;
2039 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2040 buf[i++] = 0x8d;
2041 buf[i++] = 0x64;
2042 buf[i++] = 0x24;
2043 /* This only handles adjustments up to 16, but we don't expect any more. */
2044 buf[i++] = n * 8;
2045 append_insns (&buildaddr, i, buf);
2046 current_insn_ptr = buildaddr;
2047}
2048
2049/* FN's prototype is `LONGEST(*fn)(int)'. */
2050
2051static void
2052amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2053{
2054 unsigned char buf[16];
2055 int i;
2056 CORE_ADDR buildaddr;
2057
2058 buildaddr = current_insn_ptr;
2059 i = 0;
2060 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2061 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2062 i += 4;
2063 append_insns (&buildaddr, i, buf);
2064 current_insn_ptr = buildaddr;
2065 amd64_emit_call (fn);
2066}
2067
4e29fb54 2068/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2069
2070static void
2071amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2072{
2073 unsigned char buf[16];
2074 int i;
2075 CORE_ADDR buildaddr;
2076
2077 buildaddr = current_insn_ptr;
2078 i = 0;
2079 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2080 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2081 i += 4;
2082 append_insns (&buildaddr, i, buf);
2083 current_insn_ptr = buildaddr;
2084 EMIT_ASM (amd64_void_call_2_a,
2085 /* Save away a copy of the stack top. */
2086 "push %rax\n\t"
2087 /* Also pass top as the second argument. */
2088 "mov %rax,%rsi");
2089 amd64_emit_call (fn);
2090 EMIT_ASM (amd64_void_call_2_b,
2091 /* Restore the stack top, %rax may have been trashed. */
2092 "pop %rax");
2093}
2094
6b9801d4
SS
2095void
2096amd64_emit_eq_goto (int *offset_p, int *size_p)
2097{
2098 EMIT_ASM (amd64_eq,
2099 "cmp %rax,(%rsp)\n\t"
2100 "jne .Lamd64_eq_fallthru\n\t"
2101 "lea 0x8(%rsp),%rsp\n\t"
2102 "pop %rax\n\t"
2103 /* jmp, but don't trust the assembler to choose the right jump */
2104 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2105 ".Lamd64_eq_fallthru:\n\t"
2106 "lea 0x8(%rsp),%rsp\n\t"
2107 "pop %rax");
2108
2109 if (offset_p)
2110 *offset_p = 13;
2111 if (size_p)
2112 *size_p = 4;
2113}
2114
2115void
2116amd64_emit_ne_goto (int *offset_p, int *size_p)
2117{
2118 EMIT_ASM (amd64_ne,
2119 "cmp %rax,(%rsp)\n\t"
2120 "je .Lamd64_ne_fallthru\n\t"
2121 "lea 0x8(%rsp),%rsp\n\t"
2122 "pop %rax\n\t"
2123 /* jmp, but don't trust the assembler to choose the right jump */
2124 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2125 ".Lamd64_ne_fallthru:\n\t"
2126 "lea 0x8(%rsp),%rsp\n\t"
2127 "pop %rax");
2128
2129 if (offset_p)
2130 *offset_p = 13;
2131 if (size_p)
2132 *size_p = 4;
2133}
2134
2135void
2136amd64_emit_lt_goto (int *offset_p, int *size_p)
2137{
2138 EMIT_ASM (amd64_lt,
2139 "cmp %rax,(%rsp)\n\t"
2140 "jnl .Lamd64_lt_fallthru\n\t"
2141 "lea 0x8(%rsp),%rsp\n\t"
2142 "pop %rax\n\t"
2143 /* jmp, but don't trust the assembler to choose the right jump */
2144 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2145 ".Lamd64_lt_fallthru:\n\t"
2146 "lea 0x8(%rsp),%rsp\n\t"
2147 "pop %rax");
2148
2149 if (offset_p)
2150 *offset_p = 13;
2151 if (size_p)
2152 *size_p = 4;
2153}
2154
2155void
2156amd64_emit_le_goto (int *offset_p, int *size_p)
2157{
2158 EMIT_ASM (amd64_le,
2159 "cmp %rax,(%rsp)\n\t"
2160 "jnle .Lamd64_le_fallthru\n\t"
2161 "lea 0x8(%rsp),%rsp\n\t"
2162 "pop %rax\n\t"
2163 /* jmp, but don't trust the assembler to choose the right jump */
2164 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2165 ".Lamd64_le_fallthru:\n\t"
2166 "lea 0x8(%rsp),%rsp\n\t"
2167 "pop %rax");
2168
2169 if (offset_p)
2170 *offset_p = 13;
2171 if (size_p)
2172 *size_p = 4;
2173}
2174
2175void
2176amd64_emit_gt_goto (int *offset_p, int *size_p)
2177{
2178 EMIT_ASM (amd64_gt,
2179 "cmp %rax,(%rsp)\n\t"
2180 "jng .Lamd64_gt_fallthru\n\t"
2181 "lea 0x8(%rsp),%rsp\n\t"
2182 "pop %rax\n\t"
2183 /* jmp, but don't trust the assembler to choose the right jump */
2184 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2185 ".Lamd64_gt_fallthru:\n\t"
2186 "lea 0x8(%rsp),%rsp\n\t"
2187 "pop %rax");
2188
2189 if (offset_p)
2190 *offset_p = 13;
2191 if (size_p)
2192 *size_p = 4;
2193}
2194
2195void
2196amd64_emit_ge_goto (int *offset_p, int *size_p)
2197{
2198 EMIT_ASM (amd64_ge,
2199 "cmp %rax,(%rsp)\n\t"
2200 "jnge .Lamd64_ge_fallthru\n\t"
2201 ".Lamd64_ge_jump:\n\t"
2202 "lea 0x8(%rsp),%rsp\n\t"
2203 "pop %rax\n\t"
2204 /* jmp, but don't trust the assembler to choose the right jump */
2205 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2206 ".Lamd64_ge_fallthru:\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2208 "pop %rax");
2209
2210 if (offset_p)
2211 *offset_p = 13;
2212 if (size_p)
2213 *size_p = 4;
2214}
2215
6a271cae
PA
2216struct emit_ops amd64_emit_ops =
2217 {
2218 amd64_emit_prologue,
2219 amd64_emit_epilogue,
2220 amd64_emit_add,
2221 amd64_emit_sub,
2222 amd64_emit_mul,
2223 amd64_emit_lsh,
2224 amd64_emit_rsh_signed,
2225 amd64_emit_rsh_unsigned,
2226 amd64_emit_ext,
2227 amd64_emit_log_not,
2228 amd64_emit_bit_and,
2229 amd64_emit_bit_or,
2230 amd64_emit_bit_xor,
2231 amd64_emit_bit_not,
2232 amd64_emit_equal,
2233 amd64_emit_less_signed,
2234 amd64_emit_less_unsigned,
2235 amd64_emit_ref,
2236 amd64_emit_if_goto,
2237 amd64_emit_goto,
2238 amd64_write_goto_address,
2239 amd64_emit_const,
2240 amd64_emit_call,
2241 amd64_emit_reg,
2242 amd64_emit_pop,
2243 amd64_emit_stack_flush,
2244 amd64_emit_zero_ext,
2245 amd64_emit_swap,
2246 amd64_emit_stack_adjust,
2247 amd64_emit_int_call_1,
6b9801d4
SS
2248 amd64_emit_void_call_2,
2249 amd64_emit_eq_goto,
2250 amd64_emit_ne_goto,
2251 amd64_emit_lt_goto,
2252 amd64_emit_le_goto,
2253 amd64_emit_gt_goto,
2254 amd64_emit_ge_goto
6a271cae
PA
2255 };
2256
2257#endif /* __x86_64__ */
2258
2259static void
2260i386_emit_prologue (void)
2261{
2262 EMIT_ASM32 (i386_prologue,
2263 "push %ebp\n\t"
bf15cbda
SS
2264 "mov %esp,%ebp\n\t"
2265 "push %ebx");
6a271cae
PA
2266 /* At this point, the raw regs base address is at 8(%ebp), and the
2267 value pointer is at 12(%ebp). */
2268}
2269
2270static void
2271i386_emit_epilogue (void)
2272{
2273 EMIT_ASM32 (i386_epilogue,
2274 "mov 12(%ebp),%ecx\n\t"
2275 "mov %eax,(%ecx)\n\t"
2276 "mov %ebx,0x4(%ecx)\n\t"
2277 "xor %eax,%eax\n\t"
bf15cbda 2278 "pop %ebx\n\t"
6a271cae
PA
2279 "pop %ebp\n\t"
2280 "ret");
2281}
2282
2283static void
2284i386_emit_add (void)
2285{
2286 EMIT_ASM32 (i386_add,
2287 "add (%esp),%eax\n\t"
2288 "adc 0x4(%esp),%ebx\n\t"
2289 "lea 0x8(%esp),%esp");
2290}
2291
2292static void
2293i386_emit_sub (void)
2294{
2295 EMIT_ASM32 (i386_sub,
2296 "subl %eax,(%esp)\n\t"
2297 "sbbl %ebx,4(%esp)\n\t"
2298 "pop %eax\n\t"
2299 "pop %ebx\n\t");
2300}
2301
2302static void
2303i386_emit_mul (void)
2304{
2305 emit_error = 1;
2306}
2307
2308static void
2309i386_emit_lsh (void)
2310{
2311 emit_error = 1;
2312}
2313
2314static void
2315i386_emit_rsh_signed (void)
2316{
2317 emit_error = 1;
2318}
2319
2320static void
2321i386_emit_rsh_unsigned (void)
2322{
2323 emit_error = 1;
2324}
2325
2326static void
2327i386_emit_ext (int arg)
2328{
2329 switch (arg)
2330 {
2331 case 8:
2332 EMIT_ASM32 (i386_ext_8,
2333 "cbtw\n\t"
2334 "cwtl\n\t"
2335 "movl %eax,%ebx\n\t"
2336 "sarl $31,%ebx");
2337 break;
2338 case 16:
2339 EMIT_ASM32 (i386_ext_16,
2340 "cwtl\n\t"
2341 "movl %eax,%ebx\n\t"
2342 "sarl $31,%ebx");
2343 break;
2344 case 32:
2345 EMIT_ASM32 (i386_ext_32,
2346 "movl %eax,%ebx\n\t"
2347 "sarl $31,%ebx");
2348 break;
2349 default:
2350 emit_error = 1;
2351 }
2352}
2353
2354static void
2355i386_emit_log_not (void)
2356{
2357 EMIT_ASM32 (i386_log_not,
2358 "or %ebx,%eax\n\t"
2359 "test %eax,%eax\n\t"
2360 "sete %cl\n\t"
2361 "xor %ebx,%ebx\n\t"
2362 "movzbl %cl,%eax");
2363}
2364
2365static void
2366i386_emit_bit_and (void)
2367{
2368 EMIT_ASM32 (i386_and,
2369 "and (%esp),%eax\n\t"
2370 "and 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2372}
2373
2374static void
2375i386_emit_bit_or (void)
2376{
2377 EMIT_ASM32 (i386_or,
2378 "or (%esp),%eax\n\t"
2379 "or 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381}
2382
2383static void
2384i386_emit_bit_xor (void)
2385{
2386 EMIT_ASM32 (i386_xor,
2387 "xor (%esp),%eax\n\t"
2388 "xor 0x4(%esp),%ebx\n\t"
2389 "lea 0x8(%esp),%esp");
2390}
2391
2392static void
2393i386_emit_bit_not (void)
2394{
2395 EMIT_ASM32 (i386_bit_not,
2396 "xor $0xffffffff,%eax\n\t"
2397 "xor $0xffffffff,%ebx\n\t");
2398}
2399
2400static void
2401i386_emit_equal (void)
2402{
2403 EMIT_ASM32 (i386_equal,
2404 "cmpl %ebx,4(%esp)\n\t"
2405 "jne .Li386_equal_false\n\t"
2406 "cmpl %eax,(%esp)\n\t"
2407 "je .Li386_equal_true\n\t"
2408 ".Li386_equal_false:\n\t"
2409 "xor %eax,%eax\n\t"
2410 "jmp .Li386_equal_end\n\t"
2411 ".Li386_equal_true:\n\t"
2412 "mov $1,%eax\n\t"
2413 ".Li386_equal_end:\n\t"
2414 "xor %ebx,%ebx\n\t"
2415 "lea 0x8(%esp),%esp");
2416}
2417
2418static void
2419i386_emit_less_signed (void)
2420{
2421 EMIT_ASM32 (i386_less_signed,
2422 "cmpl %ebx,4(%esp)\n\t"
2423 "jl .Li386_less_signed_true\n\t"
2424 "jne .Li386_less_signed_false\n\t"
2425 "cmpl %eax,(%esp)\n\t"
2426 "jl .Li386_less_signed_true\n\t"
2427 ".Li386_less_signed_false:\n\t"
2428 "xor %eax,%eax\n\t"
2429 "jmp .Li386_less_signed_end\n\t"
2430 ".Li386_less_signed_true:\n\t"
2431 "mov $1,%eax\n\t"
2432 ".Li386_less_signed_end:\n\t"
2433 "xor %ebx,%ebx\n\t"
2434 "lea 0x8(%esp),%esp");
2435}
2436
2437static void
2438i386_emit_less_unsigned (void)
2439{
2440 EMIT_ASM32 (i386_less_unsigned,
2441 "cmpl %ebx,4(%esp)\n\t"
2442 "jb .Li386_less_unsigned_true\n\t"
2443 "jne .Li386_less_unsigned_false\n\t"
2444 "cmpl %eax,(%esp)\n\t"
2445 "jb .Li386_less_unsigned_true\n\t"
2446 ".Li386_less_unsigned_false:\n\t"
2447 "xor %eax,%eax\n\t"
2448 "jmp .Li386_less_unsigned_end\n\t"
2449 ".Li386_less_unsigned_true:\n\t"
2450 "mov $1,%eax\n\t"
2451 ".Li386_less_unsigned_end:\n\t"
2452 "xor %ebx,%ebx\n\t"
2453 "lea 0x8(%esp),%esp");
2454}
2455
2456static void
2457i386_emit_ref (int size)
2458{
2459 switch (size)
2460 {
2461 case 1:
2462 EMIT_ASM32 (i386_ref1,
2463 "movb (%eax),%al");
2464 break;
2465 case 2:
2466 EMIT_ASM32 (i386_ref2,
2467 "movw (%eax),%ax");
2468 break;
2469 case 4:
2470 EMIT_ASM32 (i386_ref4,
2471 "movl (%eax),%eax");
2472 break;
2473 case 8:
2474 EMIT_ASM32 (i386_ref8,
2475 "movl 4(%eax),%ebx\n\t"
2476 "movl (%eax),%eax");
2477 break;
2478 }
2479}
2480
2481static void
2482i386_emit_if_goto (int *offset_p, int *size_p)
2483{
2484 EMIT_ASM32 (i386_if_goto,
2485 "mov %eax,%ecx\n\t"
2486 "or %ebx,%ecx\n\t"
2487 "pop %eax\n\t"
2488 "pop %ebx\n\t"
2489 "cmpl $0,%ecx\n\t"
2490 /* Don't trust the assembler to choose the right jump */
2491 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2492
2493 if (offset_p)
2494 *offset_p = 11; /* be sure that this matches the sequence above */
2495 if (size_p)
2496 *size_p = 4;
2497}
2498
2499static void
2500i386_emit_goto (int *offset_p, int *size_p)
2501{
2502 EMIT_ASM32 (i386_goto,
2503 /* Don't trust the assembler to choose the right jump */
2504 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2505 if (offset_p)
2506 *offset_p = 1;
2507 if (size_p)
2508 *size_p = 4;
2509}
2510
2511static void
2512i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2513{
2514 int diff = (to - (from + size));
2515 unsigned char buf[sizeof (int)];
2516
2517 /* We're only doing 4-byte sizes at the moment. */
2518 if (size != 4)
2519 {
2520 emit_error = 1;
2521 return;
2522 }
2523
2524 memcpy (buf, &diff, sizeof (int));
2525 write_inferior_memory (from, buf, sizeof (int));
2526}
2527
2528static void
4e29fb54 2529i386_emit_const (LONGEST num)
6a271cae
PA
2530{
2531 unsigned char buf[16];
b00ad6ff 2532 int i, hi, lo;
6a271cae
PA
2533 CORE_ADDR buildaddr = current_insn_ptr;
2534
2535 i = 0;
2536 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2537 lo = num & 0xffffffff;
2538 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2539 i += 4;
2540 hi = ((num >> 32) & 0xffffffff);
2541 if (hi)
2542 {
2543 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2544 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2545 i += 4;
2546 }
2547 else
2548 {
2549 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2550 }
2551 append_insns (&buildaddr, i, buf);
2552 current_insn_ptr = buildaddr;
2553}
2554
2555static void
2556i386_emit_call (CORE_ADDR fn)
2557{
2558 unsigned char buf[16];
2559 int i, offset;
2560 CORE_ADDR buildaddr;
2561
2562 buildaddr = current_insn_ptr;
2563 i = 0;
2564 buf[i++] = 0xe8; /* call <reladdr> */
2565 offset = ((int) fn) - (buildaddr + 5);
2566 memcpy (buf + 1, &offset, 4);
2567 append_insns (&buildaddr, 5, buf);
2568 current_insn_ptr = buildaddr;
2569}
2570
2571static void
2572i386_emit_reg (int reg)
2573{
2574 unsigned char buf[16];
2575 int i;
2576 CORE_ADDR buildaddr;
2577
2578 EMIT_ASM32 (i386_reg_a,
2579 "sub $0x8,%esp");
2580 buildaddr = current_insn_ptr;
2581 i = 0;
2582 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2583 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2584 i += 4;
2585 append_insns (&buildaddr, i, buf);
2586 current_insn_ptr = buildaddr;
2587 EMIT_ASM32 (i386_reg_b,
2588 "mov %eax,4(%esp)\n\t"
2589 "mov 8(%ebp),%eax\n\t"
2590 "mov %eax,(%esp)");
2591 i386_emit_call (get_raw_reg_func_addr ());
2592 EMIT_ASM32 (i386_reg_c,
2593 "xor %ebx,%ebx\n\t"
2594 "lea 0x8(%esp),%esp");
2595}
2596
2597static void
2598i386_emit_pop (void)
2599{
2600 EMIT_ASM32 (i386_pop,
2601 "pop %eax\n\t"
2602 "pop %ebx");
2603}
2604
2605static void
2606i386_emit_stack_flush (void)
2607{
2608 EMIT_ASM32 (i386_stack_flush,
2609 "push %ebx\n\t"
2610 "push %eax");
2611}
2612
2613static void
2614i386_emit_zero_ext (int arg)
2615{
2616 switch (arg)
2617 {
2618 case 8:
2619 EMIT_ASM32 (i386_zero_ext_8,
2620 "and $0xff,%eax\n\t"
2621 "xor %ebx,%ebx");
2622 break;
2623 case 16:
2624 EMIT_ASM32 (i386_zero_ext_16,
2625 "and $0xffff,%eax\n\t"
2626 "xor %ebx,%ebx");
2627 break;
2628 case 32:
2629 EMIT_ASM32 (i386_zero_ext_32,
2630 "xor %ebx,%ebx");
2631 break;
2632 default:
2633 emit_error = 1;
2634 }
2635}
2636
2637static void
2638i386_emit_swap (void)
2639{
2640 EMIT_ASM32 (i386_swap,
2641 "mov %eax,%ecx\n\t"
2642 "mov %ebx,%edx\n\t"
2643 "pop %eax\n\t"
2644 "pop %ebx\n\t"
2645 "push %edx\n\t"
2646 "push %ecx");
2647}
2648
2649static void
2650i386_emit_stack_adjust (int n)
2651{
2652 unsigned char buf[16];
2653 int i;
2654 CORE_ADDR buildaddr = current_insn_ptr;
2655
2656 i = 0;
2657 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2658 buf[i++] = 0x64;
2659 buf[i++] = 0x24;
2660 buf[i++] = n * 8;
2661 append_insns (&buildaddr, i, buf);
2662 current_insn_ptr = buildaddr;
2663}
2664
2665/* FN's prototype is `LONGEST(*fn)(int)'. */
2666
2667static void
2668i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2669{
2670 unsigned char buf[16];
2671 int i;
2672 CORE_ADDR buildaddr;
2673
2674 EMIT_ASM32 (i386_int_call_1_a,
2675 /* Reserve a bit of stack space. */
2676 "sub $0x8,%esp");
2677 /* Put the one argument on the stack. */
2678 buildaddr = current_insn_ptr;
2679 i = 0;
2680 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2681 buf[i++] = 0x04;
2682 buf[i++] = 0x24;
b00ad6ff 2683 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2684 i += 4;
2685 append_insns (&buildaddr, i, buf);
2686 current_insn_ptr = buildaddr;
2687 i386_emit_call (fn);
2688 EMIT_ASM32 (i386_int_call_1_c,
2689 "mov %edx,%ebx\n\t"
2690 "lea 0x8(%esp),%esp");
2691}
2692
4e29fb54 2693/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2694
2695static void
2696i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2697{
2698 unsigned char buf[16];
2699 int i;
2700 CORE_ADDR buildaddr;
2701
2702 EMIT_ASM32 (i386_void_call_2_a,
2703 /* Preserve %eax only; we don't have to worry about %ebx. */
2704 "push %eax\n\t"
2705 /* Reserve a bit of stack space for arguments. */
2706 "sub $0x10,%esp\n\t"
2707 /* Copy "top" to the second argument position. (Note that
2708 we can't assume function won't scribble on its
2709 arguments, so don't try to restore from this.) */
2710 "mov %eax,4(%esp)\n\t"
2711 "mov %ebx,8(%esp)");
2712 /* Put the first argument on the stack. */
2713 buildaddr = current_insn_ptr;
2714 i = 0;
2715 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2716 buf[i++] = 0x04;
2717 buf[i++] = 0x24;
b00ad6ff 2718 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2719 i += 4;
2720 append_insns (&buildaddr, i, buf);
2721 current_insn_ptr = buildaddr;
2722 i386_emit_call (fn);
2723 EMIT_ASM32 (i386_void_call_2_b,
2724 "lea 0x10(%esp),%esp\n\t"
2725 /* Restore original stack top. */
2726 "pop %eax");
2727}
2728
6b9801d4
SS
2729
2730void
2731i386_emit_eq_goto (int *offset_p, int *size_p)
2732{
2733 EMIT_ASM32 (eq,
2734 /* Check low half first, more likely to be decider */
2735 "cmpl %eax,(%esp)\n\t"
2736 "jne .Leq_fallthru\n\t"
2737 "cmpl %ebx,4(%esp)\n\t"
2738 "jne .Leq_fallthru\n\t"
2739 "lea 0x8(%esp),%esp\n\t"
2740 "pop %eax\n\t"
2741 "pop %ebx\n\t"
2742 /* jmp, but don't trust the assembler to choose the right jump */
2743 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2744 ".Leq_fallthru:\n\t"
2745 "lea 0x8(%esp),%esp\n\t"
2746 "pop %eax\n\t"
2747 "pop %ebx");
2748
2749 if (offset_p)
2750 *offset_p = 18;
2751 if (size_p)
2752 *size_p = 4;
2753}
2754
2755void
2756i386_emit_ne_goto (int *offset_p, int *size_p)
2757{
2758 EMIT_ASM32 (ne,
2759 /* Check low half first, more likely to be decider */
2760 "cmpl %eax,(%esp)\n\t"
2761 "jne .Lne_jump\n\t"
2762 "cmpl %ebx,4(%esp)\n\t"
2763 "je .Lne_fallthru\n\t"
2764 ".Lne_jump:\n\t"
2765 "lea 0x8(%esp),%esp\n\t"
2766 "pop %eax\n\t"
2767 "pop %ebx\n\t"
2768 /* jmp, but don't trust the assembler to choose the right jump */
2769 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2770 ".Lne_fallthru:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2772 "pop %eax\n\t"
2773 "pop %ebx");
2774
2775 if (offset_p)
2776 *offset_p = 18;
2777 if (size_p)
2778 *size_p = 4;
2779}
2780
2781void
2782i386_emit_lt_goto (int *offset_p, int *size_p)
2783{
2784 EMIT_ASM32 (lt,
2785 "cmpl %ebx,4(%esp)\n\t"
2786 "jl .Llt_jump\n\t"
2787 "jne .Llt_fallthru\n\t"
2788 "cmpl %eax,(%esp)\n\t"
2789 "jnl .Llt_fallthru\n\t"
2790 ".Llt_jump:\n\t"
2791 "lea 0x8(%esp),%esp\n\t"
2792 "pop %eax\n\t"
2793 "pop %ebx\n\t"
2794 /* jmp, but don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2796 ".Llt_fallthru:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx");
2800
2801 if (offset_p)
2802 *offset_p = 20;
2803 if (size_p)
2804 *size_p = 4;
2805}
2806
2807void
2808i386_emit_le_goto (int *offset_p, int *size_p)
2809{
2810 EMIT_ASM32 (le,
2811 "cmpl %ebx,4(%esp)\n\t"
2812 "jle .Lle_jump\n\t"
2813 "jne .Lle_fallthru\n\t"
2814 "cmpl %eax,(%esp)\n\t"
2815 "jnle .Lle_fallthru\n\t"
2816 ".Lle_jump:\n\t"
2817 "lea 0x8(%esp),%esp\n\t"
2818 "pop %eax\n\t"
2819 "pop %ebx\n\t"
2820 /* jmp, but don't trust the assembler to choose the right jump */
2821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2822 ".Lle_fallthru:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2824 "pop %eax\n\t"
2825 "pop %ebx");
2826
2827 if (offset_p)
2828 *offset_p = 20;
2829 if (size_p)
2830 *size_p = 4;
2831}
2832
2833void
2834i386_emit_gt_goto (int *offset_p, int *size_p)
2835{
2836 EMIT_ASM32 (gt,
2837 "cmpl %ebx,4(%esp)\n\t"
2838 "jg .Lgt_jump\n\t"
2839 "jne .Lgt_fallthru\n\t"
2840 "cmpl %eax,(%esp)\n\t"
2841 "jng .Lgt_fallthru\n\t"
2842 ".Lgt_jump:\n\t"
2843 "lea 0x8(%esp),%esp\n\t"
2844 "pop %eax\n\t"
2845 "pop %ebx\n\t"
2846 /* jmp, but don't trust the assembler to choose the right jump */
2847 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2848 ".Lgt_fallthru:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2850 "pop %eax\n\t"
2851 "pop %ebx");
2852
2853 if (offset_p)
2854 *offset_p = 20;
2855 if (size_p)
2856 *size_p = 4;
2857}
2858
2859void
2860i386_emit_ge_goto (int *offset_p, int *size_p)
2861{
2862 EMIT_ASM32 (ge,
2863 "cmpl %ebx,4(%esp)\n\t"
2864 "jge .Lge_jump\n\t"
2865 "jne .Lge_fallthru\n\t"
2866 "cmpl %eax,(%esp)\n\t"
2867 "jnge .Lge_fallthru\n\t"
2868 ".Lge_jump:\n\t"
2869 "lea 0x8(%esp),%esp\n\t"
2870 "pop %eax\n\t"
2871 "pop %ebx\n\t"
2872 /* jmp, but don't trust the assembler to choose the right jump */
2873 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2874 ".Lge_fallthru:\n\t"
2875 "lea 0x8(%esp),%esp\n\t"
2876 "pop %eax\n\t"
2877 "pop %ebx");
2878
2879 if (offset_p)
2880 *offset_p = 20;
2881 if (size_p)
2882 *size_p = 4;
2883}
2884
6a271cae
PA
2885struct emit_ops i386_emit_ops =
2886 {
2887 i386_emit_prologue,
2888 i386_emit_epilogue,
2889 i386_emit_add,
2890 i386_emit_sub,
2891 i386_emit_mul,
2892 i386_emit_lsh,
2893 i386_emit_rsh_signed,
2894 i386_emit_rsh_unsigned,
2895 i386_emit_ext,
2896 i386_emit_log_not,
2897 i386_emit_bit_and,
2898 i386_emit_bit_or,
2899 i386_emit_bit_xor,
2900 i386_emit_bit_not,
2901 i386_emit_equal,
2902 i386_emit_less_signed,
2903 i386_emit_less_unsigned,
2904 i386_emit_ref,
2905 i386_emit_if_goto,
2906 i386_emit_goto,
2907 i386_write_goto_address,
2908 i386_emit_const,
2909 i386_emit_call,
2910 i386_emit_reg,
2911 i386_emit_pop,
2912 i386_emit_stack_flush,
2913 i386_emit_zero_ext,
2914 i386_emit_swap,
2915 i386_emit_stack_adjust,
2916 i386_emit_int_call_1,
6b9801d4
SS
2917 i386_emit_void_call_2,
2918 i386_emit_eq_goto,
2919 i386_emit_ne_goto,
2920 i386_emit_lt_goto,
2921 i386_emit_le_goto,
2922 i386_emit_gt_goto,
2923 i386_emit_ge_goto
6a271cae
PA
2924 };
2925
2926
2927static struct emit_ops *
2928x86_emit_ops (void)
2929{
2930#ifdef __x86_64__
2931 int use_64bit = register_size (0) == 8;
2932
2933 if (use_64bit)
2934 return &amd64_emit_ops;
2935 else
2936#endif
2937 return &i386_emit_ops;
2938}
2939
d0722149
DE
2940/* This is initialized assuming an amd64 target.
2941 x86_arch_setup will correct it for i386 or amd64 targets. */
2942
2943struct linux_target_ops the_low_target =
2944{
2945 x86_arch_setup,
2946 -1,
2947 NULL,
2948 NULL,
2949 NULL,
2950 x86_get_pc,
2951 x86_set_pc,
2952 x86_breakpoint,
2953 x86_breakpoint_len,
2954 NULL,
2955 1,
2956 x86_breakpoint_at,
aa5ca48f
DE
2957 x86_insert_point,
2958 x86_remove_point,
2959 x86_stopped_by_watchpoint,
2960 x86_stopped_data_address,
d0722149
DE
2961 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2962 native i386 case (no registers smaller than an xfer unit), and are not
2963 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2964 NULL,
2965 NULL,
2966 /* need to fix up i386 siginfo if host is amd64 */
2967 x86_siginfo_fixup,
aa5ca48f
DE
2968 x86_linux_new_process,
2969 x86_linux_new_thread,
1570b33e 2970 x86_linux_prepare_to_resume,
219f2f23 2971 x86_linux_process_qsupported,
fa593d66
PA
2972 x86_supports_tracepoints,
2973 x86_get_thread_area,
6a271cae 2974 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2975 x86_emit_ops,
2976 x86_get_min_fast_tracepoint_insn_len,
d0722149 2977};
This page took 0.356601 seconds and 4 git commands to generate.