1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 /* Defined in auto-generated file i386-linux.c. */
35 void init_registers_i386_linux (void);
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 /* Defined in auto-generated file i386-avx-linux.c. */
39 void init_registers_i386_avx_linux (void);
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 /* Defined in auto-generated file i386-mmx-linux.c. */
43 void init_registers_i386_mmx_linux (void);
45 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
46 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
48 /* Backward compatibility for gdb without XML support. */
50 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
51 <architecture>i386</architecture>\
52 <osabi>GNU/Linux</osabi>\
56 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
57 <architecture>i386:x86-64</architecture>\
58 <osabi>GNU/Linux</osabi>\
63 #include <sys/procfs.h>
64 #include <sys/ptrace.h>
67 #ifndef PTRACE_GETREGSET
68 #define PTRACE_GETREGSET 0x4204
71 #ifndef PTRACE_SETREGSET
72 #define PTRACE_SETREGSET 0x4205
76 #ifndef PTRACE_GET_THREAD_AREA
77 #define PTRACE_GET_THREAD_AREA 25
80 /* This definition comes from prctl.h, but some kernels may not have it. */
81 #ifndef PTRACE_ARCH_PRCTL
82 #define PTRACE_ARCH_PRCTL 30
85 /* The following definitions come from prctl.h, but may be absent
86 for certain configurations. */
88 #define ARCH_SET_GS 0x1001
89 #define ARCH_SET_FS 0x1002
90 #define ARCH_GET_FS 0x1003
91 #define ARCH_GET_GS 0x1004
94 /* Per-process arch-specific data we want to keep. */
96 struct arch_process_info
98 struct i386_debug_reg_state debug_reg_state
;
101 /* Per-thread arch-specific data we want to keep. */
105 /* Non-zero if our copy differs from what's recorded in the thread. */
106 int debug_registers_changed
;
111 /* Mapping between the general-purpose registers in `struct user'
112 format and GDB's register array layout.
113 Note that the transfer layout uses 64-bit regs. */
114 static /*const*/ int i386_regmap
[] =
116 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
117 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
118 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
119 DS
* 8, ES
* 8, FS
* 8, GS
* 8
122 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
124 /* So code below doesn't have to care, i386 or amd64. */
125 #define ORIG_EAX ORIG_RAX
127 static const int x86_64_regmap
[] =
129 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
130 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
131 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
132 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
133 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
134 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1,
138 -1, -1, -1, -1, -1, -1, -1, -1, -1,
142 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
144 #else /* ! __x86_64__ */
146 /* Mapping between the general-purpose registers in `struct user'
147 format and GDB's register array layout. */
148 static /*const*/ int i386_regmap
[] =
150 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
151 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
152 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
153 DS
* 4, ES
* 4, FS
* 4, GS
* 4
156 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
160 /* Called by libthread_db. */
163 ps_get_thread_area (const struct ps_prochandle
*ph
,
164 lwpid_t lwpid
, int idx
, void **base
)
167 int use_64bit
= register_size (0) == 8;
174 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
178 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
189 unsigned int desc
[4];
191 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
192 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
195 *(int *)base
= desc
[1];
200 /* Get the thread area address. This is used to recognize which
201 thread is which when tracing with the in-process agent library. We
202 don't read anything from the address, and treat it as opaque; it's
203 the address itself that we assume is unique per-thread. */
206 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
209 int use_64bit
= register_size (0) == 8;
214 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
216 *addr
= (CORE_ADDR
) (uintptr_t) base
;
225 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
226 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
227 unsigned int desc
[4];
229 const int reg_thread_area
= 3; /* bits to scale down register value. */
232 collect_register_by_name (regcache
, "gs", &gs
);
234 idx
= gs
>> reg_thread_area
;
236 if (ptrace (PTRACE_GET_THREAD_AREA
,
238 (void *) (long) idx
, (unsigned long) &desc
) < 0)
249 i386_cannot_store_register (int regno
)
251 return regno
>= I386_NUM_REGS
;
255 i386_cannot_fetch_register (int regno
)
257 return regno
>= I386_NUM_REGS
;
261 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
266 if (register_size (0) == 8)
268 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
269 if (x86_64_regmap
[i
] != -1)
270 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
275 for (i
= 0; i
< I386_NUM_REGS
; i
++)
276 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
278 collect_register_by_name (regcache
, "orig_eax",
279 ((char *) buf
) + ORIG_EAX
* 4);
283 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
288 if (register_size (0) == 8)
290 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
291 if (x86_64_regmap
[i
] != -1)
292 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
297 for (i
= 0; i
< I386_NUM_REGS
; i
++)
298 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
300 supply_register_by_name (regcache
, "orig_eax",
301 ((char *) buf
) + ORIG_EAX
* 4);
305 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
308 i387_cache_to_fxsave (regcache
, buf
);
310 i387_cache_to_fsave (regcache
, buf
);
315 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
318 i387_fxsave_to_cache (regcache
, buf
);
320 i387_fsave_to_cache (regcache
, buf
);
327 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
329 i387_cache_to_fxsave (regcache
, buf
);
333 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
335 i387_fxsave_to_cache (regcache
, buf
);
341 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
343 i387_cache_to_xsave (regcache
, buf
);
347 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
349 i387_xsave_to_cache (regcache
, buf
);
352 /* ??? The non-biarch i386 case stores all the i387 regs twice.
353 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
354 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
355 doesn't work. IWBN to avoid the duplication in the case where it
356 does work. Maybe the arch_setup routine could check whether it works
357 and update target_regsets accordingly, maybe by moving target_regsets
358 to linux_target_ops and set the right one there, rather than having to
359 modify the target_regsets global. */
361 struct regset_info target_regsets
[] =
363 #ifdef HAVE_PTRACE_GETREGS
364 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
366 x86_fill_gregset
, x86_store_gregset
},
367 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
368 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
370 # ifdef HAVE_PTRACE_GETFPXREGS
371 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
373 x86_fill_fpxregset
, x86_store_fpxregset
},
376 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
378 x86_fill_fpregset
, x86_store_fpregset
},
379 #endif /* HAVE_PTRACE_GETREGS */
380 { 0, 0, 0, -1, -1, NULL
, NULL
}
384 x86_get_pc (struct regcache
*regcache
)
386 int use_64bit
= register_size (0) == 8;
391 collect_register_by_name (regcache
, "rip", &pc
);
392 return (CORE_ADDR
) pc
;
397 collect_register_by_name (regcache
, "eip", &pc
);
398 return (CORE_ADDR
) pc
;
403 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
405 int use_64bit
= register_size (0) == 8;
409 unsigned long newpc
= pc
;
410 supply_register_by_name (regcache
, "rip", &newpc
);
414 unsigned int newpc
= pc
;
415 supply_register_by_name (regcache
, "eip", &newpc
);
419 static const unsigned char x86_breakpoint
[] = { 0xCC };
420 #define x86_breakpoint_len 1
423 x86_breakpoint_at (CORE_ADDR pc
)
427 (*the_target
->read_memory
) (pc
, &c
, 1);
434 /* Support for debug registers. */
437 x86_linux_dr_get (ptid_t ptid
, int regnum
)
442 tid
= ptid_get_lwp (ptid
);
445 value
= ptrace (PTRACE_PEEKUSER
, tid
,
446 offsetof (struct user
, u_debugreg
[regnum
]), 0);
448 error ("Couldn't read debug register");
454 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
458 tid
= ptid_get_lwp (ptid
);
461 ptrace (PTRACE_POKEUSER
, tid
,
462 offsetof (struct user
, u_debugreg
[regnum
]), value
);
464 error ("Couldn't write debug register");
468 update_debug_registers_callback (struct inferior_list_entry
*entry
,
471 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
472 int pid
= *(int *) pid_p
;
474 /* Only update the threads of this process. */
475 if (pid_of (lwp
) == pid
)
477 /* The actual update is done later just before resuming the lwp,
478 we just mark that the registers need updating. */
479 lwp
->arch_private
->debug_registers_changed
= 1;
481 /* If the lwp isn't stopped, force it to momentarily pause, so
482 we can update its debug registers. */
484 linux_stop_lwp (lwp
);
490 /* Update the inferior's debug register REGNUM from STATE. */
493 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
495 /* Only update the threads of this process. */
496 int pid
= pid_of (get_thread_lwp (current_inferior
));
498 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
499 fatal ("Invalid debug register %d", regnum
);
501 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
504 /* Return the inferior's debug register REGNUM. */
507 i386_dr_low_get_addr (int regnum
)
509 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
510 ptid_t ptid
= ptid_of (lwp
);
512 /* DR6 and DR7 are retrieved with some other way. */
513 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
515 return x86_linux_dr_get (ptid
, regnum
);
518 /* Update the inferior's DR7 debug control register from STATE. */
521 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
523 /* Only update the threads of this process. */
524 int pid
= pid_of (get_thread_lwp (current_inferior
));
526 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
529 /* Return the inferior's DR7 debug control register. */
532 i386_dr_low_get_control (void)
534 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
535 ptid_t ptid
= ptid_of (lwp
);
537 return x86_linux_dr_get (ptid
, DR_CONTROL
);
540 /* Get the value of the DR6 debug status register from the inferior
541 and record it in STATE. */
544 i386_dr_low_get_status (void)
546 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
547 ptid_t ptid
= ptid_of (lwp
);
549 return x86_linux_dr_get (ptid
, DR_STATUS
);
552 /* Breakpoint/Watchpoint support. */
555 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
557 struct process_info
*proc
= current_process ();
564 ret
= prepare_to_access_memory ();
567 ret
= set_gdb_breakpoint_at (addr
);
568 done_accessing_memory ();
574 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
583 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
585 struct process_info
*proc
= current_process ();
592 ret
= prepare_to_access_memory ();
595 ret
= delete_gdb_breakpoint_at (addr
);
596 done_accessing_memory ();
602 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
611 x86_stopped_by_watchpoint (void)
613 struct process_info
*proc
= current_process ();
614 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
618 x86_stopped_data_address (void)
620 struct process_info
*proc
= current_process ();
622 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
628 /* Called when a new process is created. */
630 static struct arch_process_info
*
631 x86_linux_new_process (void)
633 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
635 i386_low_init_dregs (&info
->debug_reg_state
);
640 /* Called when a new thread is detected. */
642 static struct arch_lwp_info
*
643 x86_linux_new_thread (void)
645 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
647 info
->debug_registers_changed
= 1;
652 /* Called when resuming a thread.
653 If the debug regs have changed, update the thread's copies. */
656 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
658 ptid_t ptid
= ptid_of (lwp
);
659 int clear_status
= 0;
661 if (lwp
->arch_private
->debug_registers_changed
)
664 int pid
= ptid_get_pid (ptid
);
665 struct process_info
*proc
= find_process_pid (pid
);
666 struct i386_debug_reg_state
*state
667 = &proc
->private->arch_private
->debug_reg_state
;
669 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
670 if (state
->dr_ref_count
[i
] > 0)
672 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
674 /* If we're setting a watchpoint, any change the inferior
675 had done itself to the debug registers needs to be
676 discarded, otherwise, i386_low_stopped_data_address can
681 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
683 lwp
->arch_private
->debug_registers_changed
= 0;
686 if (clear_status
|| lwp
->stopped_by_watchpoint
)
687 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
690 /* When GDBSERVER is built as a 64-bit application on linux, the
691 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
692 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
693 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
694 conversion in-place ourselves. */
696 /* These types below (compat_*) define a siginfo type that is layout
697 compatible with the siginfo type exported by the 32-bit userspace
702 typedef int compat_int_t
;
703 typedef unsigned int compat_uptr_t
;
705 typedef int compat_time_t
;
706 typedef int compat_timer_t
;
707 typedef int compat_clock_t
;
709 struct compat_timeval
711 compat_time_t tv_sec
;
715 typedef union compat_sigval
717 compat_int_t sival_int
;
718 compat_uptr_t sival_ptr
;
721 typedef struct compat_siginfo
729 int _pad
[((128 / sizeof (int)) - 3)];
738 /* POSIX.1b timers */
743 compat_sigval_t _sigval
;
746 /* POSIX.1b signals */
751 compat_sigval_t _sigval
;
760 compat_clock_t _utime
;
761 compat_clock_t _stime
;
764 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
779 #define cpt_si_pid _sifields._kill._pid
780 #define cpt_si_uid _sifields._kill._uid
781 #define cpt_si_timerid _sifields._timer._tid
782 #define cpt_si_overrun _sifields._timer._overrun
783 #define cpt_si_status _sifields._sigchld._status
784 #define cpt_si_utime _sifields._sigchld._utime
785 #define cpt_si_stime _sifields._sigchld._stime
786 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
787 #define cpt_si_addr _sifields._sigfault._addr
788 #define cpt_si_band _sifields._sigpoll._band
789 #define cpt_si_fd _sifields._sigpoll._fd
791 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
792 In their place is si_timer1,si_timer2. */
794 #define si_timerid si_timer1
797 #define si_overrun si_timer2
801 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
803 memset (to
, 0, sizeof (*to
));
805 to
->si_signo
= from
->si_signo
;
806 to
->si_errno
= from
->si_errno
;
807 to
->si_code
= from
->si_code
;
809 if (to
->si_code
== SI_TIMER
)
811 to
->cpt_si_timerid
= from
->si_timerid
;
812 to
->cpt_si_overrun
= from
->si_overrun
;
813 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
815 else if (to
->si_code
== SI_USER
)
817 to
->cpt_si_pid
= from
->si_pid
;
818 to
->cpt_si_uid
= from
->si_uid
;
820 else if (to
->si_code
< 0)
822 to
->cpt_si_pid
= from
->si_pid
;
823 to
->cpt_si_uid
= from
->si_uid
;
824 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
828 switch (to
->si_signo
)
831 to
->cpt_si_pid
= from
->si_pid
;
832 to
->cpt_si_uid
= from
->si_uid
;
833 to
->cpt_si_status
= from
->si_status
;
834 to
->cpt_si_utime
= from
->si_utime
;
835 to
->cpt_si_stime
= from
->si_stime
;
841 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
844 to
->cpt_si_band
= from
->si_band
;
845 to
->cpt_si_fd
= from
->si_fd
;
848 to
->cpt_si_pid
= from
->si_pid
;
849 to
->cpt_si_uid
= from
->si_uid
;
850 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
857 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
859 memset (to
, 0, sizeof (*to
));
861 to
->si_signo
= from
->si_signo
;
862 to
->si_errno
= from
->si_errno
;
863 to
->si_code
= from
->si_code
;
865 if (to
->si_code
== SI_TIMER
)
867 to
->si_timerid
= from
->cpt_si_timerid
;
868 to
->si_overrun
= from
->cpt_si_overrun
;
869 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
871 else if (to
->si_code
== SI_USER
)
873 to
->si_pid
= from
->cpt_si_pid
;
874 to
->si_uid
= from
->cpt_si_uid
;
876 else if (to
->si_code
< 0)
878 to
->si_pid
= from
->cpt_si_pid
;
879 to
->si_uid
= from
->cpt_si_uid
;
880 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
884 switch (to
->si_signo
)
887 to
->si_pid
= from
->cpt_si_pid
;
888 to
->si_uid
= from
->cpt_si_uid
;
889 to
->si_status
= from
->cpt_si_status
;
890 to
->si_utime
= from
->cpt_si_utime
;
891 to
->si_stime
= from
->cpt_si_stime
;
897 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
900 to
->si_band
= from
->cpt_si_band
;
901 to
->si_fd
= from
->cpt_si_fd
;
904 to
->si_pid
= from
->cpt_si_pid
;
905 to
->si_uid
= from
->cpt_si_uid
;
906 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
912 #endif /* __x86_64__ */
914 /* Convert a native/host siginfo object, into/from the siginfo in the
915 layout of the inferiors' architecture. Returns true if any
916 conversion was done; false otherwise. If DIRECTION is 1, then copy
917 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
921 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
924 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
925 if (register_size (0) == 4)
927 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
928 fatal ("unexpected difference in siginfo");
931 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
933 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
944 /* Update gdbserver_xmltarget. */
947 x86_linux_update_xmltarget (void)
950 struct regset_info
*regset
;
951 static unsigned long long xcr0
;
952 static int have_ptrace_getregset
= -1;
953 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
954 static int have_ptrace_getfpxregs
= -1;
957 if (!current_inferior
)
960 /* Before changing the register cache internal layout or the target
961 regsets, flush the contents of the current valid caches back to
963 regcache_invalidate ();
965 pid
= pid_of (get_thread_lwp (current_inferior
));
967 if (num_xmm_registers
== 8)
968 init_registers_i386_linux ();
970 init_registers_amd64_linux ();
973 # ifdef HAVE_PTRACE_GETFPXREGS
974 if (have_ptrace_getfpxregs
== -1)
976 elf_fpxregset_t fpxregs
;
978 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
980 have_ptrace_getfpxregs
= 0;
981 x86_xcr0
= I386_XSTATE_X87_MASK
;
983 /* Disable PTRACE_GETFPXREGS. */
984 for (regset
= target_regsets
;
985 regset
->fill_function
!= NULL
; regset
++)
986 if (regset
->get_request
== PTRACE_GETFPXREGS
)
993 have_ptrace_getfpxregs
= 1;
996 if (!have_ptrace_getfpxregs
)
998 init_registers_i386_mmx_linux ();
1002 init_registers_i386_linux ();
1008 /* Don't use XML. */
1010 if (num_xmm_registers
== 8)
1011 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1013 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1015 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1018 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1023 /* Check if XSAVE extended state is supported. */
1024 if (have_ptrace_getregset
== -1)
1026 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1029 iov
.iov_base
= xstateregs
;
1030 iov
.iov_len
= sizeof (xstateregs
);
1032 /* Check if PTRACE_GETREGSET works. */
1033 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1036 have_ptrace_getregset
= 0;
1040 have_ptrace_getregset
= 1;
1042 /* Get XCR0 from XSAVE extended state at byte 464. */
1043 xcr0
= xstateregs
[464 / sizeof (long long)];
1045 /* Use PTRACE_GETREGSET if it is available. */
1046 for (regset
= target_regsets
;
1047 regset
->fill_function
!= NULL
; regset
++)
1048 if (regset
->get_request
== PTRACE_GETREGSET
)
1049 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1050 else if (regset
->type
!= GENERAL_REGS
)
1054 if (have_ptrace_getregset
)
1056 /* AVX is the highest feature we support. */
1057 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1062 /* I386 has 8 xmm regs. */
1063 if (num_xmm_registers
== 8)
1064 init_registers_i386_avx_linux ();
1066 init_registers_amd64_avx_linux ();
1068 init_registers_i386_avx_linux ();
1074 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1075 PTRACE_GETREGSET. */
1078 x86_linux_process_qsupported (const char *query
)
1080 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1081 with "i386" in qSupported query, it supports x86 XML target
1084 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1086 char *copy
= xstrdup (query
+ 13);
1089 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1091 if (strcmp (p
, "i386") == 0)
1101 x86_linux_update_xmltarget ();
1104 /* Initialize gdbserver for the architecture of the inferior. */
1107 x86_arch_setup (void)
1110 int pid
= pid_of (get_thread_lwp (current_inferior
));
1111 int use_64bit
= linux_pid_exe_is_elf_64_file (pid
);
1115 /* This can only happen if /proc/<pid>/exe is unreadable,
1116 but "that can't happen" if we've gotten this far.
1117 Fall through and assume this is a 32-bit program. */
1121 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1122 the_low_target
.num_regs
= -1;
1123 the_low_target
.regmap
= NULL
;
1124 the_low_target
.cannot_fetch_register
= NULL
;
1125 the_low_target
.cannot_store_register
= NULL
;
1127 /* Amd64 has 16 xmm regs. */
1128 num_xmm_registers
= 16;
1130 x86_linux_update_xmltarget ();
1135 /* Ok we have a 32-bit inferior. */
1137 the_low_target
.num_regs
= I386_NUM_REGS
;
1138 the_low_target
.regmap
= i386_regmap
;
1139 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1140 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1142 /* I386 has 8 xmm regs. */
1143 num_xmm_registers
= 8;
1145 x86_linux_update_xmltarget ();
1149 x86_supports_tracepoints (void)
1155 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1157 write_inferior_memory (*to
, buf
, len
);
1162 push_opcode (unsigned char *buf
, char *op
)
1164 unsigned char *buf_org
= buf
;
1169 unsigned long ul
= strtoul (op
, &endptr
, 16);
1178 return buf
- buf_org
;
1183 /* Build a jump pad that saves registers and calls a collection
1184 function. Writes a jump instruction to the jump pad to
1185 JJUMPAD_INSN. The caller is responsible to write it in at the
1186 tracepoint address. */
1189 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1190 CORE_ADDR collector
,
1193 CORE_ADDR
*jump_entry
,
1194 CORE_ADDR
*trampoline
,
1195 ULONGEST
*trampoline_size
,
1196 unsigned char *jjump_pad_insn
,
1197 ULONGEST
*jjump_pad_insn_size
,
1198 CORE_ADDR
*adjusted_insn_addr
,
1199 CORE_ADDR
*adjusted_insn_addr_end
,
1202 unsigned char buf
[40];
1206 CORE_ADDR buildaddr
= *jump_entry
;
1208 /* Build the jump pad. */
1210 /* First, do tracepoint data collection. Save registers. */
1212 /* Need to ensure stack pointer saved first. */
1213 buf
[i
++] = 0x54; /* push %rsp */
1214 buf
[i
++] = 0x55; /* push %rbp */
1215 buf
[i
++] = 0x57; /* push %rdi */
1216 buf
[i
++] = 0x56; /* push %rsi */
1217 buf
[i
++] = 0x52; /* push %rdx */
1218 buf
[i
++] = 0x51; /* push %rcx */
1219 buf
[i
++] = 0x53; /* push %rbx */
1220 buf
[i
++] = 0x50; /* push %rax */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1223 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1224 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1225 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1226 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1227 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1228 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1229 buf
[i
++] = 0x9c; /* pushfq */
1230 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1232 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1233 i
+= sizeof (unsigned long);
1234 buf
[i
++] = 0x57; /* push %rdi */
1235 append_insns (&buildaddr
, i
, buf
);
1237 /* Stack space for the collecting_t object. */
1239 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1240 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1241 memcpy (buf
+ i
, &tpoint
, 8);
1243 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1244 i
+= push_opcode (&buf
[i
],
1245 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1246 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1247 append_insns (&buildaddr
, i
, buf
);
1251 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1252 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1254 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1255 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1256 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1257 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1258 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1259 append_insns (&buildaddr
, i
, buf
);
1261 /* Set up the gdb_collect call. */
1262 /* At this point, (stack pointer + 0x18) is the base of our saved
1266 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1267 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1269 /* tpoint address may be 64-bit wide. */
1270 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1271 memcpy (buf
+ i
, &tpoint
, 8);
1273 append_insns (&buildaddr
, i
, buf
);
1275 /* The collector function being in the shared library, may be
1276 >31-bits away off the jump pad. */
1278 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1279 memcpy (buf
+ i
, &collector
, 8);
1281 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1282 append_insns (&buildaddr
, i
, buf
);
1284 /* Clear the spin-lock. */
1286 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1287 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1288 memcpy (buf
+ i
, &lockaddr
, 8);
1290 append_insns (&buildaddr
, i
, buf
);
1292 /* Remove stack that had been used for the collect_t object. */
1294 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1295 append_insns (&buildaddr
, i
, buf
);
1297 /* Restore register state. */
1299 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1303 buf
[i
++] = 0x9d; /* popfq */
1304 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1305 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1306 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1307 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1308 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1309 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1310 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1311 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1312 buf
[i
++] = 0x58; /* pop %rax */
1313 buf
[i
++] = 0x5b; /* pop %rbx */
1314 buf
[i
++] = 0x59; /* pop %rcx */
1315 buf
[i
++] = 0x5a; /* pop %rdx */
1316 buf
[i
++] = 0x5e; /* pop %rsi */
1317 buf
[i
++] = 0x5f; /* pop %rdi */
1318 buf
[i
++] = 0x5d; /* pop %rbp */
1319 buf
[i
++] = 0x5c; /* pop %rsp */
1320 append_insns (&buildaddr
, i
, buf
);
1322 /* Now, adjust the original instruction to execute in the jump
1324 *adjusted_insn_addr
= buildaddr
;
1325 relocate_instruction (&buildaddr
, tpaddr
);
1326 *adjusted_insn_addr_end
= buildaddr
;
1328 /* Finally, write a jump back to the program. */
1330 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1331 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1334 "E.Jump back from jump pad too far from tracepoint "
1335 "(offset 0x%" PRIx64
" > int32).", loffset
);
1339 offset
= (int) loffset
;
1340 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1341 memcpy (buf
+ 1, &offset
, 4);
1342 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1344 /* The jump pad is now built. Wire in a jump to our jump pad. This
1345 is always done last (by our caller actually), so that we can
1346 install fast tracepoints with threads running. This relies on
1347 the agent's atomic write support. */
1348 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1349 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1352 "E.Jump pad too far from tracepoint "
1353 "(offset 0x%" PRIx64
" > int32).", loffset
);
1357 offset
= (int) loffset
;
1359 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1360 memcpy (buf
+ 1, &offset
, 4);
1361 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1362 *jjump_pad_insn_size
= sizeof (jump_insn
);
1364 /* Return the end address of our pad. */
1365 *jump_entry
= buildaddr
;
1370 #endif /* __x86_64__ */
1372 /* Build a jump pad that saves registers and calls a collection
1373 function. Writes a jump instruction to the jump pad to
1374 JJUMPAD_INSN. The caller is responsible to write it in at the
1375 tracepoint address. */
1378 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1379 CORE_ADDR collector
,
1382 CORE_ADDR
*jump_entry
,
1383 CORE_ADDR
*trampoline
,
1384 ULONGEST
*trampoline_size
,
1385 unsigned char *jjump_pad_insn
,
1386 ULONGEST
*jjump_pad_insn_size
,
1387 CORE_ADDR
*adjusted_insn_addr
,
1388 CORE_ADDR
*adjusted_insn_addr_end
,
1391 unsigned char buf
[0x100];
1393 CORE_ADDR buildaddr
= *jump_entry
;
1395 /* Build the jump pad. */
1397 /* First, do tracepoint data collection. Save registers. */
1399 buf
[i
++] = 0x60; /* pushad */
1400 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1401 *((int *)(buf
+ i
)) = (int) tpaddr
;
1403 buf
[i
++] = 0x9c; /* pushf */
1404 buf
[i
++] = 0x1e; /* push %ds */
1405 buf
[i
++] = 0x06; /* push %es */
1406 buf
[i
++] = 0x0f; /* push %fs */
1408 buf
[i
++] = 0x0f; /* push %gs */
1410 buf
[i
++] = 0x16; /* push %ss */
1411 buf
[i
++] = 0x0e; /* push %cs */
1412 append_insns (&buildaddr
, i
, buf
);
1414 /* Stack space for the collecting_t object. */
1416 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1418 /* Build the object. */
1419 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1420 memcpy (buf
+ i
, &tpoint
, 4);
1422 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1424 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1425 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1426 append_insns (&buildaddr
, i
, buf
);
1428 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1429 If we cared for it, this could be using xchg alternatively. */
1432 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1433 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1435 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1437 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1438 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1439 append_insns (&buildaddr
, i
, buf
);
1442 /* Set up arguments to the gdb_collect call. */
1444 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1445 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1446 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1447 append_insns (&buildaddr
, i
, buf
);
1450 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1451 append_insns (&buildaddr
, i
, buf
);
1454 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1455 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1457 append_insns (&buildaddr
, i
, buf
);
1459 buf
[0] = 0xe8; /* call <reladdr> */
1460 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1461 memcpy (buf
+ 1, &offset
, 4);
1462 append_insns (&buildaddr
, 5, buf
);
1463 /* Clean up after the call. */
1464 buf
[0] = 0x83; /* add $0x8,%esp */
1467 append_insns (&buildaddr
, 3, buf
);
1470 /* Clear the spin-lock. This would need the LOCK prefix on older
1473 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1474 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1475 memcpy (buf
+ i
, &lockaddr
, 4);
1477 append_insns (&buildaddr
, i
, buf
);
1480 /* Remove stack that had been used for the collect_t object. */
1482 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1483 append_insns (&buildaddr
, i
, buf
);
1486 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1489 buf
[i
++] = 0x17; /* pop %ss */
1490 buf
[i
++] = 0x0f; /* pop %gs */
1492 buf
[i
++] = 0x0f; /* pop %fs */
1494 buf
[i
++] = 0x07; /* pop %es */
1495 buf
[i
++] = 0x1f; /* pop %ds */
1496 buf
[i
++] = 0x9d; /* popf */
1497 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1500 buf
[i
++] = 0x61; /* popad */
1501 append_insns (&buildaddr
, i
, buf
);
1503 /* Now, adjust the original instruction to execute in the jump
1505 *adjusted_insn_addr
= buildaddr
;
1506 relocate_instruction (&buildaddr
, tpaddr
);
1507 *adjusted_insn_addr_end
= buildaddr
;
1509 /* Write the jump back to the program. */
1510 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1511 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1512 memcpy (buf
+ 1, &offset
, 4);
1513 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1515 /* The jump pad is now built. Wire in a jump to our jump pad. This
1516 is always done last (by our caller actually), so that we can
1517 install fast tracepoints with threads running. This relies on
1518 the agent's atomic write support. */
1521 /* Create a trampoline. */
1522 *trampoline_size
= sizeof (jump_insn
);
1523 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1525 /* No trampoline space available. */
1527 "E.Cannot allocate trampoline space needed for fast "
1528 "tracepoints on 4-byte instructions.");
1532 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1533 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1534 memcpy (buf
+ 1, &offset
, 4);
1535 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1537 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1538 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1539 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1540 memcpy (buf
+ 2, &offset
, 2);
1541 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1542 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1546 /* Else use a 32-bit relative jump instruction. */
1547 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1548 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1549 memcpy (buf
+ 1, &offset
, 4);
1550 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1551 *jjump_pad_insn_size
= sizeof (jump_insn
);
1554 /* Return the end address of our pad. */
1555 *jump_entry
= buildaddr
;
1561 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1562 CORE_ADDR collector
,
1565 CORE_ADDR
*jump_entry
,
1566 CORE_ADDR
*trampoline
,
1567 ULONGEST
*trampoline_size
,
1568 unsigned char *jjump_pad_insn
,
1569 ULONGEST
*jjump_pad_insn_size
,
1570 CORE_ADDR
*adjusted_insn_addr
,
1571 CORE_ADDR
*adjusted_insn_addr_end
,
1575 if (register_size (0) == 8)
1576 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1577 collector
, lockaddr
,
1578 orig_size
, jump_entry
,
1579 trampoline
, trampoline_size
,
1581 jjump_pad_insn_size
,
1583 adjusted_insn_addr_end
,
1587 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1588 collector
, lockaddr
,
1589 orig_size
, jump_entry
,
1590 trampoline
, trampoline_size
,
1592 jjump_pad_insn_size
,
1594 adjusted_insn_addr_end
,
1598 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1602 x86_get_min_fast_tracepoint_insn_len (void)
1604 static int warned_about_fast_tracepoints
= 0;
1607 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1608 used for fast tracepoints. */
1609 if (register_size (0) == 8)
1613 if (agent_loaded_p ())
1615 char errbuf
[IPA_BUFSIZ
];
1619 /* On x86, if trampolines are available, then 4-byte jump instructions
1620 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1621 with a 4-byte offset are used instead. */
1622 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1626 /* GDB has no channel to explain to user why a shorter fast
1627 tracepoint is not possible, but at least make GDBserver
1628 mention that something has gone awry. */
1629 if (!warned_about_fast_tracepoints
)
1631 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1632 warned_about_fast_tracepoints
= 1;
1639 /* Indicate that the minimum length is currently unknown since the IPA
1640 has not loaded yet. */
1646 add_insns (unsigned char *start
, int len
)
1648 CORE_ADDR buildaddr
= current_insn_ptr
;
1651 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1652 len
, paddress (buildaddr
));
1654 append_insns (&buildaddr
, len
, start
);
1655 current_insn_ptr
= buildaddr
;
1658 /* Our general strategy for emitting code is to avoid specifying raw
1659 bytes whenever possible, and instead copy a block of inline asm
1660 that is embedded in the function. This is a little messy, because
1661 we need to keep the compiler from discarding what looks like dead
1662 code, plus suppress various warnings. */
1664 #define EMIT_ASM(NAME, INSNS) \
1667 extern unsigned char start_ ## NAME, end_ ## NAME; \
1668 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1669 __asm__ ("jmp end_" #NAME "\n" \
1670 "\t" "start_" #NAME ":" \
1672 "\t" "end_" #NAME ":"); \
1677 #define EMIT_ASM32(NAME,INSNS) \
1680 extern unsigned char start_ ## NAME, end_ ## NAME; \
1681 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1682 __asm__ (".code32\n" \
1683 "\t" "jmp end_" #NAME "\n" \
1684 "\t" "start_" #NAME ":\n" \
1686 "\t" "end_" #NAME ":\n" \
1692 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1699 amd64_emit_prologue (void)
1701 EMIT_ASM (amd64_prologue
,
1703 "movq %rsp,%rbp\n\t"
1704 "sub $0x20,%rsp\n\t"
1705 "movq %rdi,-8(%rbp)\n\t"
1706 "movq %rsi,-16(%rbp)");
1711 amd64_emit_epilogue (void)
1713 EMIT_ASM (amd64_epilogue
,
1714 "movq -16(%rbp),%rdi\n\t"
1715 "movq %rax,(%rdi)\n\t"
1722 amd64_emit_add (void)
1724 EMIT_ASM (amd64_add
,
1725 "add (%rsp),%rax\n\t"
1726 "lea 0x8(%rsp),%rsp");
1730 amd64_emit_sub (void)
1732 EMIT_ASM (amd64_sub
,
1733 "sub %rax,(%rsp)\n\t"
1738 amd64_emit_mul (void)
1744 amd64_emit_lsh (void)
1750 amd64_emit_rsh_signed (void)
1756 amd64_emit_rsh_unsigned (void)
1762 amd64_emit_ext (int arg
)
1767 EMIT_ASM (amd64_ext_8
,
1773 EMIT_ASM (amd64_ext_16
,
1778 EMIT_ASM (amd64_ext_32
,
1787 amd64_emit_log_not (void)
1789 EMIT_ASM (amd64_log_not
,
1790 "test %rax,%rax\n\t"
1796 amd64_emit_bit_and (void)
1798 EMIT_ASM (amd64_and
,
1799 "and (%rsp),%rax\n\t"
1800 "lea 0x8(%rsp),%rsp");
1804 amd64_emit_bit_or (void)
1807 "or (%rsp),%rax\n\t"
1808 "lea 0x8(%rsp),%rsp");
1812 amd64_emit_bit_xor (void)
1814 EMIT_ASM (amd64_xor
,
1815 "xor (%rsp),%rax\n\t"
1816 "lea 0x8(%rsp),%rsp");
1820 amd64_emit_bit_not (void)
1822 EMIT_ASM (amd64_bit_not
,
1823 "xorq $0xffffffffffffffff,%rax");
1827 amd64_emit_equal (void)
1829 EMIT_ASM (amd64_equal
,
1830 "cmp %rax,(%rsp)\n\t"
1831 "je .Lamd64_equal_true\n\t"
1833 "jmp .Lamd64_equal_end\n\t"
1834 ".Lamd64_equal_true:\n\t"
1836 ".Lamd64_equal_end:\n\t"
1837 "lea 0x8(%rsp),%rsp");
1841 amd64_emit_less_signed (void)
1843 EMIT_ASM (amd64_less_signed
,
1844 "cmp %rax,(%rsp)\n\t"
1845 "jl .Lamd64_less_signed_true\n\t"
1847 "jmp .Lamd64_less_signed_end\n\t"
1848 ".Lamd64_less_signed_true:\n\t"
1850 ".Lamd64_less_signed_end:\n\t"
1851 "lea 0x8(%rsp),%rsp");
1855 amd64_emit_less_unsigned (void)
1857 EMIT_ASM (amd64_less_unsigned
,
1858 "cmp %rax,(%rsp)\n\t"
1859 "jb .Lamd64_less_unsigned_true\n\t"
1861 "jmp .Lamd64_less_unsigned_end\n\t"
1862 ".Lamd64_less_unsigned_true:\n\t"
1864 ".Lamd64_less_unsigned_end:\n\t"
1865 "lea 0x8(%rsp),%rsp");
1869 amd64_emit_ref (int size
)
1874 EMIT_ASM (amd64_ref1
,
1878 EMIT_ASM (amd64_ref2
,
1882 EMIT_ASM (amd64_ref4
,
1883 "movl (%rax),%eax");
1886 EMIT_ASM (amd64_ref8
,
1887 "movq (%rax),%rax");
1893 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1895 EMIT_ASM (amd64_if_goto
,
1899 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1907 amd64_emit_goto (int *offset_p
, int *size_p
)
1909 EMIT_ASM (amd64_goto
,
1910 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1918 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1920 int diff
= (to
- (from
+ size
));
1921 unsigned char buf
[sizeof (int)];
1929 memcpy (buf
, &diff
, sizeof (int));
1930 write_inferior_memory (from
, buf
, sizeof (int));
1934 amd64_emit_const (LONGEST num
)
1936 unsigned char buf
[16];
1938 CORE_ADDR buildaddr
= current_insn_ptr
;
1941 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1942 memcpy (&buf
[i
], &num
, sizeof (num
));
1944 append_insns (&buildaddr
, i
, buf
);
1945 current_insn_ptr
= buildaddr
;
1949 amd64_emit_call (CORE_ADDR fn
)
1951 unsigned char buf
[16];
1953 CORE_ADDR buildaddr
;
1956 /* The destination function being in the shared library, may be
1957 >31-bits away off the compiled code pad. */
1959 buildaddr
= current_insn_ptr
;
1961 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1965 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1967 /* Offset is too large for a call. Use callq, but that requires
1968 a register, so avoid it if possible. Use r10, since it is
1969 call-clobbered, we don't have to push/pop it. */
1970 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1972 memcpy (buf
+ i
, &fn
, 8);
1974 buf
[i
++] = 0xff; /* callq *%r10 */
1979 int offset32
= offset64
; /* we know we can't overflow here. */
1980 memcpy (buf
+ i
, &offset32
, 4);
1984 append_insns (&buildaddr
, i
, buf
);
1985 current_insn_ptr
= buildaddr
;
1989 amd64_emit_reg (int reg
)
1991 unsigned char buf
[16];
1993 CORE_ADDR buildaddr
;
1995 /* Assume raw_regs is still in %rdi. */
1996 buildaddr
= current_insn_ptr
;
1998 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1999 memcpy (&buf
[i
], ®
, sizeof (reg
));
2001 append_insns (&buildaddr
, i
, buf
);
2002 current_insn_ptr
= buildaddr
;
2003 amd64_emit_call (get_raw_reg_func_addr ());
2007 amd64_emit_pop (void)
2009 EMIT_ASM (amd64_pop
,
2014 amd64_emit_stack_flush (void)
2016 EMIT_ASM (amd64_stack_flush
,
2021 amd64_emit_zero_ext (int arg
)
2026 EMIT_ASM (amd64_zero_ext_8
,
2030 EMIT_ASM (amd64_zero_ext_16
,
2031 "and $0xffff,%rax");
2034 EMIT_ASM (amd64_zero_ext_32
,
2035 "mov $0xffffffff,%rcx\n\t"
2044 amd64_emit_swap (void)
2046 EMIT_ASM (amd64_swap
,
2053 amd64_emit_stack_adjust (int n
)
2055 unsigned char buf
[16];
2057 CORE_ADDR buildaddr
= current_insn_ptr
;
2060 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2064 /* This only handles adjustments up to 16, but we don't expect any more. */
2066 append_insns (&buildaddr
, i
, buf
);
2067 current_insn_ptr
= buildaddr
;
2070 /* FN's prototype is `LONGEST(*fn)(int)'. */
2073 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2075 unsigned char buf
[16];
2077 CORE_ADDR buildaddr
;
2079 buildaddr
= current_insn_ptr
;
2081 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2082 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2084 append_insns (&buildaddr
, i
, buf
);
2085 current_insn_ptr
= buildaddr
;
2086 amd64_emit_call (fn
);
2089 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2092 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2094 unsigned char buf
[16];
2096 CORE_ADDR buildaddr
;
2098 buildaddr
= current_insn_ptr
;
2100 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2101 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2103 append_insns (&buildaddr
, i
, buf
);
2104 current_insn_ptr
= buildaddr
;
2105 EMIT_ASM (amd64_void_call_2_a
,
2106 /* Save away a copy of the stack top. */
2108 /* Also pass top as the second argument. */
2110 amd64_emit_call (fn
);
2111 EMIT_ASM (amd64_void_call_2_b
,
2112 /* Restore the stack top, %rax may have been trashed. */
2117 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2120 "cmp %rax,(%rsp)\n\t"
2121 "jne .Lamd64_eq_fallthru\n\t"
2122 "lea 0x8(%rsp),%rsp\n\t"
2124 /* jmp, but don't trust the assembler to choose the right jump */
2125 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2126 ".Lamd64_eq_fallthru:\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2137 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2140 "cmp %rax,(%rsp)\n\t"
2141 "je .Lamd64_ne_fallthru\n\t"
2142 "lea 0x8(%rsp),%rsp\n\t"
2144 /* jmp, but don't trust the assembler to choose the right jump */
2145 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2146 ".Lamd64_ne_fallthru:\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2157 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2160 "cmp %rax,(%rsp)\n\t"
2161 "jnl .Lamd64_lt_fallthru\n\t"
2162 "lea 0x8(%rsp),%rsp\n\t"
2164 /* jmp, but don't trust the assembler to choose the right jump */
2165 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2166 ".Lamd64_lt_fallthru:\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2177 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2180 "cmp %rax,(%rsp)\n\t"
2181 "jnle .Lamd64_le_fallthru\n\t"
2182 "lea 0x8(%rsp),%rsp\n\t"
2184 /* jmp, but don't trust the assembler to choose the right jump */
2185 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2186 ".Lamd64_le_fallthru:\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2197 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2200 "cmp %rax,(%rsp)\n\t"
2201 "jng .Lamd64_gt_fallthru\n\t"
2202 "lea 0x8(%rsp),%rsp\n\t"
2204 /* jmp, but don't trust the assembler to choose the right jump */
2205 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2206 ".Lamd64_gt_fallthru:\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2217 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2220 "cmp %rax,(%rsp)\n\t"
2221 "jnge .Lamd64_ge_fallthru\n\t"
2222 ".Lamd64_ge_jump:\n\t"
2223 "lea 0x8(%rsp),%rsp\n\t"
2225 /* jmp, but don't trust the assembler to choose the right jump */
2226 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2227 ".Lamd64_ge_fallthru:\n\t"
2228 "lea 0x8(%rsp),%rsp\n\t"
2237 struct emit_ops amd64_emit_ops
=
2239 amd64_emit_prologue
,
2240 amd64_emit_epilogue
,
2245 amd64_emit_rsh_signed
,
2246 amd64_emit_rsh_unsigned
,
2254 amd64_emit_less_signed
,
2255 amd64_emit_less_unsigned
,
2259 amd64_write_goto_address
,
2264 amd64_emit_stack_flush
,
2265 amd64_emit_zero_ext
,
2267 amd64_emit_stack_adjust
,
2268 amd64_emit_int_call_1
,
2269 amd64_emit_void_call_2
,
2278 #endif /* __x86_64__ */
2281 i386_emit_prologue (void)
2283 EMIT_ASM32 (i386_prologue
,
2287 /* At this point, the raw regs base address is at 8(%ebp), and the
2288 value pointer is at 12(%ebp). */
2292 i386_emit_epilogue (void)
2294 EMIT_ASM32 (i386_epilogue
,
2295 "mov 12(%ebp),%ecx\n\t"
2296 "mov %eax,(%ecx)\n\t"
2297 "mov %ebx,0x4(%ecx)\n\t"
2305 i386_emit_add (void)
2307 EMIT_ASM32 (i386_add
,
2308 "add (%esp),%eax\n\t"
2309 "adc 0x4(%esp),%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2314 i386_emit_sub (void)
2316 EMIT_ASM32 (i386_sub
,
2317 "subl %eax,(%esp)\n\t"
2318 "sbbl %ebx,4(%esp)\n\t"
2324 i386_emit_mul (void)
2330 i386_emit_lsh (void)
2336 i386_emit_rsh_signed (void)
2342 i386_emit_rsh_unsigned (void)
2348 i386_emit_ext (int arg
)
2353 EMIT_ASM32 (i386_ext_8
,
2356 "movl %eax,%ebx\n\t"
2360 EMIT_ASM32 (i386_ext_16
,
2362 "movl %eax,%ebx\n\t"
2366 EMIT_ASM32 (i386_ext_32
,
2367 "movl %eax,%ebx\n\t"
2376 i386_emit_log_not (void)
2378 EMIT_ASM32 (i386_log_not
,
2380 "test %eax,%eax\n\t"
2387 i386_emit_bit_and (void)
2389 EMIT_ASM32 (i386_and
,
2390 "and (%esp),%eax\n\t"
2391 "and 0x4(%esp),%ebx\n\t"
2392 "lea 0x8(%esp),%esp");
2396 i386_emit_bit_or (void)
2398 EMIT_ASM32 (i386_or
,
2399 "or (%esp),%eax\n\t"
2400 "or 0x4(%esp),%ebx\n\t"
2401 "lea 0x8(%esp),%esp");
2405 i386_emit_bit_xor (void)
2407 EMIT_ASM32 (i386_xor
,
2408 "xor (%esp),%eax\n\t"
2409 "xor 0x4(%esp),%ebx\n\t"
2410 "lea 0x8(%esp),%esp");
2414 i386_emit_bit_not (void)
2416 EMIT_ASM32 (i386_bit_not
,
2417 "xor $0xffffffff,%eax\n\t"
2418 "xor $0xffffffff,%ebx\n\t");
2422 i386_emit_equal (void)
2424 EMIT_ASM32 (i386_equal
,
2425 "cmpl %ebx,4(%esp)\n\t"
2426 "jne .Li386_equal_false\n\t"
2427 "cmpl %eax,(%esp)\n\t"
2428 "je .Li386_equal_true\n\t"
2429 ".Li386_equal_false:\n\t"
2431 "jmp .Li386_equal_end\n\t"
2432 ".Li386_equal_true:\n\t"
2434 ".Li386_equal_end:\n\t"
2436 "lea 0x8(%esp),%esp");
2440 i386_emit_less_signed (void)
2442 EMIT_ASM32 (i386_less_signed
,
2443 "cmpl %ebx,4(%esp)\n\t"
2444 "jl .Li386_less_signed_true\n\t"
2445 "jne .Li386_less_signed_false\n\t"
2446 "cmpl %eax,(%esp)\n\t"
2447 "jl .Li386_less_signed_true\n\t"
2448 ".Li386_less_signed_false:\n\t"
2450 "jmp .Li386_less_signed_end\n\t"
2451 ".Li386_less_signed_true:\n\t"
2453 ".Li386_less_signed_end:\n\t"
2455 "lea 0x8(%esp),%esp");
2459 i386_emit_less_unsigned (void)
2461 EMIT_ASM32 (i386_less_unsigned
,
2462 "cmpl %ebx,4(%esp)\n\t"
2463 "jb .Li386_less_unsigned_true\n\t"
2464 "jne .Li386_less_unsigned_false\n\t"
2465 "cmpl %eax,(%esp)\n\t"
2466 "jb .Li386_less_unsigned_true\n\t"
2467 ".Li386_less_unsigned_false:\n\t"
2469 "jmp .Li386_less_unsigned_end\n\t"
2470 ".Li386_less_unsigned_true:\n\t"
2472 ".Li386_less_unsigned_end:\n\t"
2474 "lea 0x8(%esp),%esp");
2478 i386_emit_ref (int size
)
2483 EMIT_ASM32 (i386_ref1
,
2487 EMIT_ASM32 (i386_ref2
,
2491 EMIT_ASM32 (i386_ref4
,
2492 "movl (%eax),%eax");
2495 EMIT_ASM32 (i386_ref8
,
2496 "movl 4(%eax),%ebx\n\t"
2497 "movl (%eax),%eax");
2503 i386_emit_if_goto (int *offset_p
, int *size_p
)
2505 EMIT_ASM32 (i386_if_goto
,
2511 /* Don't trust the assembler to choose the right jump */
2512 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2515 *offset_p
= 11; /* be sure that this matches the sequence above */
2521 i386_emit_goto (int *offset_p
, int *size_p
)
2523 EMIT_ASM32 (i386_goto
,
2524 /* Don't trust the assembler to choose the right jump */
2525 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2533 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2535 int diff
= (to
- (from
+ size
));
2536 unsigned char buf
[sizeof (int)];
2538 /* We're only doing 4-byte sizes at the moment. */
2545 memcpy (buf
, &diff
, sizeof (int));
2546 write_inferior_memory (from
, buf
, sizeof (int));
2550 i386_emit_const (LONGEST num
)
2552 unsigned char buf
[16];
2554 CORE_ADDR buildaddr
= current_insn_ptr
;
2557 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2558 lo
= num
& 0xffffffff;
2559 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2561 hi
= ((num
>> 32) & 0xffffffff);
2564 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2565 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2570 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2572 append_insns (&buildaddr
, i
, buf
);
2573 current_insn_ptr
= buildaddr
;
2577 i386_emit_call (CORE_ADDR fn
)
2579 unsigned char buf
[16];
2581 CORE_ADDR buildaddr
;
2583 buildaddr
= current_insn_ptr
;
2585 buf
[i
++] = 0xe8; /* call <reladdr> */
2586 offset
= ((int) fn
) - (buildaddr
+ 5);
2587 memcpy (buf
+ 1, &offset
, 4);
2588 append_insns (&buildaddr
, 5, buf
);
2589 current_insn_ptr
= buildaddr
;
2593 i386_emit_reg (int reg
)
2595 unsigned char buf
[16];
2597 CORE_ADDR buildaddr
;
2599 EMIT_ASM32 (i386_reg_a
,
2601 buildaddr
= current_insn_ptr
;
2603 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2604 memcpy (&buf
[i
], ®
, sizeof (reg
));
2606 append_insns (&buildaddr
, i
, buf
);
2607 current_insn_ptr
= buildaddr
;
2608 EMIT_ASM32 (i386_reg_b
,
2609 "mov %eax,4(%esp)\n\t"
2610 "mov 8(%ebp),%eax\n\t"
2612 i386_emit_call (get_raw_reg_func_addr ());
2613 EMIT_ASM32 (i386_reg_c
,
2615 "lea 0x8(%esp),%esp");
2619 i386_emit_pop (void)
2621 EMIT_ASM32 (i386_pop
,
2627 i386_emit_stack_flush (void)
2629 EMIT_ASM32 (i386_stack_flush
,
2635 i386_emit_zero_ext (int arg
)
2640 EMIT_ASM32 (i386_zero_ext_8
,
2641 "and $0xff,%eax\n\t"
2645 EMIT_ASM32 (i386_zero_ext_16
,
2646 "and $0xffff,%eax\n\t"
2650 EMIT_ASM32 (i386_zero_ext_32
,
2659 i386_emit_swap (void)
2661 EMIT_ASM32 (i386_swap
,
2671 i386_emit_stack_adjust (int n
)
2673 unsigned char buf
[16];
2675 CORE_ADDR buildaddr
= current_insn_ptr
;
2678 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2682 append_insns (&buildaddr
, i
, buf
);
2683 current_insn_ptr
= buildaddr
;
2686 /* FN's prototype is `LONGEST(*fn)(int)'. */
2689 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2691 unsigned char buf
[16];
2693 CORE_ADDR buildaddr
;
2695 EMIT_ASM32 (i386_int_call_1_a
,
2696 /* Reserve a bit of stack space. */
2698 /* Put the one argument on the stack. */
2699 buildaddr
= current_insn_ptr
;
2701 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2704 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2706 append_insns (&buildaddr
, i
, buf
);
2707 current_insn_ptr
= buildaddr
;
2708 i386_emit_call (fn
);
2709 EMIT_ASM32 (i386_int_call_1_c
,
2711 "lea 0x8(%esp),%esp");
2714 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2717 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2719 unsigned char buf
[16];
2721 CORE_ADDR buildaddr
;
2723 EMIT_ASM32 (i386_void_call_2_a
,
2724 /* Preserve %eax only; we don't have to worry about %ebx. */
2726 /* Reserve a bit of stack space for arguments. */
2727 "sub $0x10,%esp\n\t"
2728 /* Copy "top" to the second argument position. (Note that
2729 we can't assume function won't scribble on its
2730 arguments, so don't try to restore from this.) */
2731 "mov %eax,4(%esp)\n\t"
2732 "mov %ebx,8(%esp)");
2733 /* Put the first argument on the stack. */
2734 buildaddr
= current_insn_ptr
;
2736 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2739 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2741 append_insns (&buildaddr
, i
, buf
);
2742 current_insn_ptr
= buildaddr
;
2743 i386_emit_call (fn
);
2744 EMIT_ASM32 (i386_void_call_2_b
,
2745 "lea 0x10(%esp),%esp\n\t"
2746 /* Restore original stack top. */
2752 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2755 /* Check low half first, more likely to be decider */
2756 "cmpl %eax,(%esp)\n\t"
2757 "jne .Leq_fallthru\n\t"
2758 "cmpl %ebx,4(%esp)\n\t"
2759 "jne .Leq_fallthru\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2763 /* jmp, but don't trust the assembler to choose the right jump */
2764 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 ".Leq_fallthru:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2777 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2780 /* Check low half first, more likely to be decider */
2781 "cmpl %eax,(%esp)\n\t"
2783 "cmpl %ebx,4(%esp)\n\t"
2784 "je .Lne_fallthru\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2789 /* jmp, but don't trust the assembler to choose the right jump */
2790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 ".Lne_fallthru:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2803 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2806 "cmpl %ebx,4(%esp)\n\t"
2808 "jne .Llt_fallthru\n\t"
2809 "cmpl %eax,(%esp)\n\t"
2810 "jnl .Llt_fallthru\n\t"
2812 "lea 0x8(%esp),%esp\n\t"
2815 /* jmp, but don't trust the assembler to choose the right jump */
2816 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2817 ".Llt_fallthru:\n\t"
2818 "lea 0x8(%esp),%esp\n\t"
2829 i386_emit_le_goto (int *offset_p
, int *size_p
)
2832 "cmpl %ebx,4(%esp)\n\t"
2834 "jne .Lle_fallthru\n\t"
2835 "cmpl %eax,(%esp)\n\t"
2836 "jnle .Lle_fallthru\n\t"
2838 "lea 0x8(%esp),%esp\n\t"
2841 /* jmp, but don't trust the assembler to choose the right jump */
2842 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2843 ".Lle_fallthru:\n\t"
2844 "lea 0x8(%esp),%esp\n\t"
2855 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2858 "cmpl %ebx,4(%esp)\n\t"
2860 "jne .Lgt_fallthru\n\t"
2861 "cmpl %eax,(%esp)\n\t"
2862 "jng .Lgt_fallthru\n\t"
2864 "lea 0x8(%esp),%esp\n\t"
2867 /* jmp, but don't trust the assembler to choose the right jump */
2868 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2869 ".Lgt_fallthru:\n\t"
2870 "lea 0x8(%esp),%esp\n\t"
2881 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2884 "cmpl %ebx,4(%esp)\n\t"
2886 "jne .Lge_fallthru\n\t"
2887 "cmpl %eax,(%esp)\n\t"
2888 "jnge .Lge_fallthru\n\t"
2890 "lea 0x8(%esp),%esp\n\t"
2893 /* jmp, but don't trust the assembler to choose the right jump */
2894 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2895 ".Lge_fallthru:\n\t"
2896 "lea 0x8(%esp),%esp\n\t"
2906 struct emit_ops i386_emit_ops
=
2914 i386_emit_rsh_signed
,
2915 i386_emit_rsh_unsigned
,
2923 i386_emit_less_signed
,
2924 i386_emit_less_unsigned
,
2928 i386_write_goto_address
,
2933 i386_emit_stack_flush
,
2936 i386_emit_stack_adjust
,
2937 i386_emit_int_call_1
,
2938 i386_emit_void_call_2
,
2948 static struct emit_ops
*
2952 int use_64bit
= register_size (0) == 8;
2955 return &amd64_emit_ops
;
2958 return &i386_emit_ops
;
2961 /* This is initialized assuming an amd64 target.
2962 x86_arch_setup will correct it for i386 or amd64 targets. */
2964 struct linux_target_ops the_low_target
=
2981 x86_stopped_by_watchpoint
,
2982 x86_stopped_data_address
,
2983 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2984 native i386 case (no registers smaller than an xfer unit), and are not
2985 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2988 /* need to fix up i386 siginfo if host is amd64 */
2990 x86_linux_new_process
,
2991 x86_linux_new_thread
,
2992 x86_linux_prepare_to_resume
,
2993 x86_linux_process_qsupported
,
2994 x86_supports_tracepoints
,
2995 x86_get_thread_area
,
2996 x86_install_fast_tracepoint_jump_pad
,
2998 x86_get_min_fast_tracepoint_insn_len
,