1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 /* Defined in auto-generated file i386-linux.c. */
35 void init_registers_i386_linux (void);
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 /* Defined in auto-generated file i386-avx-linux.c. */
39 void init_registers_i386_avx_linux (void);
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 /* Defined in auto-generated file i386-mmx-linux.c. */
43 void init_registers_i386_mmx_linux (void);
44 /* Defined in auto-generated file x32-linux.c. */
45 void init_registers_x32_linux (void);
46 /* Defined in auto-generated file x32-avx-linux.c. */
47 void init_registers_x32_avx_linux (void);
49 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
50 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
52 /* Backward compatibility for gdb without XML support. */
54 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
55 <architecture>i386</architecture>\
56 <osabi>GNU/Linux</osabi>\
60 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
61 <architecture>i386:x86-64</architecture>\
62 <osabi>GNU/Linux</osabi>\
67 #include <sys/procfs.h>
68 #include <sys/ptrace.h>
71 #ifndef PTRACE_GETREGSET
72 #define PTRACE_GETREGSET 0x4204
75 #ifndef PTRACE_SETREGSET
76 #define PTRACE_SETREGSET 0x4205
80 #ifndef PTRACE_GET_THREAD_AREA
81 #define PTRACE_GET_THREAD_AREA 25
84 /* This definition comes from prctl.h, but some kernels may not have it. */
85 #ifndef PTRACE_ARCH_PRCTL
86 #define PTRACE_ARCH_PRCTL 30
89 /* The following definitions come from prctl.h, but may be absent
90 for certain configurations. */
92 #define ARCH_SET_GS 0x1001
93 #define ARCH_SET_FS 0x1002
94 #define ARCH_GET_FS 0x1003
95 #define ARCH_GET_GS 0x1004
98 /* Per-process arch-specific data we want to keep. */
100 struct arch_process_info
102 struct i386_debug_reg_state debug_reg_state
;
105 /* Per-thread arch-specific data we want to keep. */
109 /* Non-zero if our copy differs from what's recorded in the thread. */
110 int debug_registers_changed
;
115 /* Mapping between the general-purpose registers in `struct user'
116 format and GDB's register array layout.
117 Note that the transfer layout uses 64-bit regs. */
118 static /*const*/ int i386_regmap
[] =
120 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
121 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
122 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
123 DS
* 8, ES
* 8, FS
* 8, GS
* 8
126 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
128 /* So code below doesn't have to care, i386 or amd64. */
129 #define ORIG_EAX ORIG_RAX
131 static const int x86_64_regmap
[] =
133 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
134 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
135 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
136 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
137 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
138 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1,
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, -1,
146 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
148 #else /* ! __x86_64__ */
150 /* Mapping between the general-purpose registers in `struct user'
151 format and GDB's register array layout. */
152 static /*const*/ int i386_regmap
[] =
154 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
155 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
156 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
157 DS
* 4, ES
* 4, FS
* 4, GS
* 4
160 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
164 /* Called by libthread_db. */
167 ps_get_thread_area (const struct ps_prochandle
*ph
,
168 lwpid_t lwpid
, int idx
, void **base
)
171 int use_64bit
= register_size (0) == 8;
178 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
182 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
193 unsigned int desc
[4];
195 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
196 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
199 /* Ensure we properly extend the value to 64-bits for x86_64. */
200 *base
= (void *) (uintptr_t) desc
[1];
205 /* Get the thread area address. This is used to recognize which
206 thread is which when tracing with the in-process agent library. We
207 don't read anything from the address, and treat it as opaque; it's
208 the address itself that we assume is unique per-thread. */
211 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
214 int use_64bit
= register_size (0) == 8;
219 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
221 *addr
= (CORE_ADDR
) (uintptr_t) base
;
230 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
231 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
232 unsigned int desc
[4];
234 const int reg_thread_area
= 3; /* bits to scale down register value. */
237 collect_register_by_name (regcache
, "gs", &gs
);
239 idx
= gs
>> reg_thread_area
;
241 if (ptrace (PTRACE_GET_THREAD_AREA
,
243 (void *) (long) idx
, (unsigned long) &desc
) < 0)
254 i386_cannot_store_register (int regno
)
256 return regno
>= I386_NUM_REGS
;
260 i386_cannot_fetch_register (int regno
)
262 return regno
>= I386_NUM_REGS
;
266 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
271 if (register_size (0) == 8)
273 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
274 if (x86_64_regmap
[i
] != -1)
275 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
280 for (i
= 0; i
< I386_NUM_REGS
; i
++)
281 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
283 collect_register_by_name (regcache
, "orig_eax",
284 ((char *) buf
) + ORIG_EAX
* 4);
288 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
293 if (register_size (0) == 8)
295 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
296 if (x86_64_regmap
[i
] != -1)
297 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
302 for (i
= 0; i
< I386_NUM_REGS
; i
++)
303 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
305 supply_register_by_name (regcache
, "orig_eax",
306 ((char *) buf
) + ORIG_EAX
* 4);
310 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
313 i387_cache_to_fxsave (regcache
, buf
);
315 i387_cache_to_fsave (regcache
, buf
);
320 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
323 i387_fxsave_to_cache (regcache
, buf
);
325 i387_fsave_to_cache (regcache
, buf
);
332 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
334 i387_cache_to_fxsave (regcache
, buf
);
338 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
340 i387_fxsave_to_cache (regcache
, buf
);
346 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
348 i387_cache_to_xsave (regcache
, buf
);
352 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
354 i387_xsave_to_cache (regcache
, buf
);
357 /* ??? The non-biarch i386 case stores all the i387 regs twice.
358 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
359 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
360 doesn't work. IWBN to avoid the duplication in the case where it
361 does work. Maybe the arch_setup routine could check whether it works
362 and update target_regsets accordingly, maybe by moving target_regsets
363 to linux_target_ops and set the right one there, rather than having to
364 modify the target_regsets global. */
366 struct regset_info target_regsets
[] =
368 #ifdef HAVE_PTRACE_GETREGS
369 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
371 x86_fill_gregset
, x86_store_gregset
},
372 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
373 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
375 # ifdef HAVE_PTRACE_GETFPXREGS
376 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
378 x86_fill_fpxregset
, x86_store_fpxregset
},
381 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
383 x86_fill_fpregset
, x86_store_fpregset
},
384 #endif /* HAVE_PTRACE_GETREGS */
385 { 0, 0, 0, -1, -1, NULL
, NULL
}
389 x86_get_pc (struct regcache
*regcache
)
391 int use_64bit
= register_size (0) == 8;
396 collect_register_by_name (regcache
, "rip", &pc
);
397 return (CORE_ADDR
) pc
;
402 collect_register_by_name (regcache
, "eip", &pc
);
403 return (CORE_ADDR
) pc
;
408 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
410 int use_64bit
= register_size (0) == 8;
414 unsigned long newpc
= pc
;
415 supply_register_by_name (regcache
, "rip", &newpc
);
419 unsigned int newpc
= pc
;
420 supply_register_by_name (regcache
, "eip", &newpc
);
424 static const unsigned char x86_breakpoint
[] = { 0xCC };
425 #define x86_breakpoint_len 1
428 x86_breakpoint_at (CORE_ADDR pc
)
432 (*the_target
->read_memory
) (pc
, &c
, 1);
439 /* Support for debug registers. */
442 x86_linux_dr_get (ptid_t ptid
, int regnum
)
447 tid
= ptid_get_lwp (ptid
);
450 value
= ptrace (PTRACE_PEEKUSER
, tid
,
451 offsetof (struct user
, u_debugreg
[regnum
]), 0);
453 error ("Couldn't read debug register");
459 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
463 tid
= ptid_get_lwp (ptid
);
466 ptrace (PTRACE_POKEUSER
, tid
,
467 offsetof (struct user
, u_debugreg
[regnum
]), value
);
469 error ("Couldn't write debug register");
473 update_debug_registers_callback (struct inferior_list_entry
*entry
,
476 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
477 int pid
= *(int *) pid_p
;
479 /* Only update the threads of this process. */
480 if (pid_of (lwp
) == pid
)
482 /* The actual update is done later just before resuming the lwp,
483 we just mark that the registers need updating. */
484 lwp
->arch_private
->debug_registers_changed
= 1;
486 /* If the lwp isn't stopped, force it to momentarily pause, so
487 we can update its debug registers. */
489 linux_stop_lwp (lwp
);
495 /* Update the inferior's debug register REGNUM from STATE. */
498 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
500 /* Only update the threads of this process. */
501 int pid
= pid_of (get_thread_lwp (current_inferior
));
503 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
504 fatal ("Invalid debug register %d", regnum
);
506 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
509 /* Return the inferior's debug register REGNUM. */
512 i386_dr_low_get_addr (int regnum
)
514 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
515 ptid_t ptid
= ptid_of (lwp
);
517 /* DR6 and DR7 are retrieved with some other way. */
518 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
520 return x86_linux_dr_get (ptid
, regnum
);
523 /* Update the inferior's DR7 debug control register from STATE. */
526 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
528 /* Only update the threads of this process. */
529 int pid
= pid_of (get_thread_lwp (current_inferior
));
531 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
534 /* Return the inferior's DR7 debug control register. */
537 i386_dr_low_get_control (void)
539 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
540 ptid_t ptid
= ptid_of (lwp
);
542 return x86_linux_dr_get (ptid
, DR_CONTROL
);
545 /* Get the value of the DR6 debug status register from the inferior
546 and record it in STATE. */
549 i386_dr_low_get_status (void)
551 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
552 ptid_t ptid
= ptid_of (lwp
);
554 return x86_linux_dr_get (ptid
, DR_STATUS
);
557 /* Breakpoint/Watchpoint support. */
560 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
562 struct process_info
*proc
= current_process ();
565 case '0': /* software-breakpoint */
569 ret
= prepare_to_access_memory ();
572 ret
= set_gdb_breakpoint_at (addr
);
573 done_accessing_memory ();
576 case '1': /* hardware-breakpoint */
577 case '2': /* write watchpoint */
578 case '3': /* read watchpoint */
579 case '4': /* access watchpoint */
580 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
590 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
592 struct process_info
*proc
= current_process ();
595 case '0': /* software-breakpoint */
599 ret
= prepare_to_access_memory ();
602 ret
= delete_gdb_breakpoint_at (addr
);
603 done_accessing_memory ();
606 case '1': /* hardware-breakpoint */
607 case '2': /* write watchpoint */
608 case '3': /* read watchpoint */
609 case '4': /* access watchpoint */
610 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
619 x86_stopped_by_watchpoint (void)
621 struct process_info
*proc
= current_process ();
622 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
626 x86_stopped_data_address (void)
628 struct process_info
*proc
= current_process ();
630 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
636 /* Called when a new process is created. */
638 static struct arch_process_info
*
639 x86_linux_new_process (void)
641 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
643 i386_low_init_dregs (&info
->debug_reg_state
);
648 /* Called when a new thread is detected. */
650 static struct arch_lwp_info
*
651 x86_linux_new_thread (void)
653 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
655 info
->debug_registers_changed
= 1;
660 /* Called when resuming a thread.
661 If the debug regs have changed, update the thread's copies. */
664 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
666 ptid_t ptid
= ptid_of (lwp
);
667 int clear_status
= 0;
669 if (lwp
->arch_private
->debug_registers_changed
)
672 int pid
= ptid_get_pid (ptid
);
673 struct process_info
*proc
= find_process_pid (pid
);
674 struct i386_debug_reg_state
*state
675 = &proc
->private->arch_private
->debug_reg_state
;
677 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
678 if (state
->dr_ref_count
[i
] > 0)
680 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
682 /* If we're setting a watchpoint, any change the inferior
683 had done itself to the debug registers needs to be
684 discarded, otherwise, i386_low_stopped_data_address can
689 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
691 lwp
->arch_private
->debug_registers_changed
= 0;
694 if (clear_status
|| lwp
->stopped_by_watchpoint
)
695 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
698 /* When GDBSERVER is built as a 64-bit application on linux, the
699 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
700 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
701 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
702 conversion in-place ourselves. */
704 /* These types below (compat_*) define a siginfo type that is layout
705 compatible with the siginfo type exported by the 32-bit userspace
710 typedef int compat_int_t
;
711 typedef unsigned int compat_uptr_t
;
713 typedef int compat_time_t
;
714 typedef int compat_timer_t
;
715 typedef int compat_clock_t
;
717 struct compat_timeval
719 compat_time_t tv_sec
;
723 typedef union compat_sigval
725 compat_int_t sival_int
;
726 compat_uptr_t sival_ptr
;
729 typedef struct compat_siginfo
737 int _pad
[((128 / sizeof (int)) - 3)];
746 /* POSIX.1b timers */
751 compat_sigval_t _sigval
;
754 /* POSIX.1b signals */
759 compat_sigval_t _sigval
;
768 compat_clock_t _utime
;
769 compat_clock_t _stime
;
772 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
787 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
788 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
790 typedef struct compat_x32_siginfo
798 int _pad
[((128 / sizeof (int)) - 3)];
807 /* POSIX.1b timers */
812 compat_sigval_t _sigval
;
815 /* POSIX.1b signals */
820 compat_sigval_t _sigval
;
829 compat_x32_clock_t _utime
;
830 compat_x32_clock_t _stime
;
833 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
846 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
848 #define cpt_si_pid _sifields._kill._pid
849 #define cpt_si_uid _sifields._kill._uid
850 #define cpt_si_timerid _sifields._timer._tid
851 #define cpt_si_overrun _sifields._timer._overrun
852 #define cpt_si_status _sifields._sigchld._status
853 #define cpt_si_utime _sifields._sigchld._utime
854 #define cpt_si_stime _sifields._sigchld._stime
855 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
856 #define cpt_si_addr _sifields._sigfault._addr
857 #define cpt_si_band _sifields._sigpoll._band
858 #define cpt_si_fd _sifields._sigpoll._fd
860 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
861 In their place is si_timer1,si_timer2. */
863 #define si_timerid si_timer1
866 #define si_overrun si_timer2
870 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
872 memset (to
, 0, sizeof (*to
));
874 to
->si_signo
= from
->si_signo
;
875 to
->si_errno
= from
->si_errno
;
876 to
->si_code
= from
->si_code
;
878 if (to
->si_code
== SI_TIMER
)
880 to
->cpt_si_timerid
= from
->si_timerid
;
881 to
->cpt_si_overrun
= from
->si_overrun
;
882 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
884 else if (to
->si_code
== SI_USER
)
886 to
->cpt_si_pid
= from
->si_pid
;
887 to
->cpt_si_uid
= from
->si_uid
;
889 else if (to
->si_code
< 0)
891 to
->cpt_si_pid
= from
->si_pid
;
892 to
->cpt_si_uid
= from
->si_uid
;
893 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
897 switch (to
->si_signo
)
900 to
->cpt_si_pid
= from
->si_pid
;
901 to
->cpt_si_uid
= from
->si_uid
;
902 to
->cpt_si_status
= from
->si_status
;
903 to
->cpt_si_utime
= from
->si_utime
;
904 to
->cpt_si_stime
= from
->si_stime
;
910 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
913 to
->cpt_si_band
= from
->si_band
;
914 to
->cpt_si_fd
= from
->si_fd
;
917 to
->cpt_si_pid
= from
->si_pid
;
918 to
->cpt_si_uid
= from
->si_uid
;
919 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
926 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
928 memset (to
, 0, sizeof (*to
));
930 to
->si_signo
= from
->si_signo
;
931 to
->si_errno
= from
->si_errno
;
932 to
->si_code
= from
->si_code
;
934 if (to
->si_code
== SI_TIMER
)
936 to
->si_timerid
= from
->cpt_si_timerid
;
937 to
->si_overrun
= from
->cpt_si_overrun
;
938 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
940 else if (to
->si_code
== SI_USER
)
942 to
->si_pid
= from
->cpt_si_pid
;
943 to
->si_uid
= from
->cpt_si_uid
;
945 else if (to
->si_code
< 0)
947 to
->si_pid
= from
->cpt_si_pid
;
948 to
->si_uid
= from
->cpt_si_uid
;
949 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
953 switch (to
->si_signo
)
956 to
->si_pid
= from
->cpt_si_pid
;
957 to
->si_uid
= from
->cpt_si_uid
;
958 to
->si_status
= from
->cpt_si_status
;
959 to
->si_utime
= from
->cpt_si_utime
;
960 to
->si_stime
= from
->cpt_si_stime
;
966 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
969 to
->si_band
= from
->cpt_si_band
;
970 to
->si_fd
= from
->cpt_si_fd
;
973 to
->si_pid
= from
->cpt_si_pid
;
974 to
->si_uid
= from
->cpt_si_uid
;
975 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
982 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
985 memset (to
, 0, sizeof (*to
));
987 to
->si_signo
= from
->si_signo
;
988 to
->si_errno
= from
->si_errno
;
989 to
->si_code
= from
->si_code
;
991 if (to
->si_code
== SI_TIMER
)
993 to
->cpt_si_timerid
= from
->si_timerid
;
994 to
->cpt_si_overrun
= from
->si_overrun
;
995 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
997 else if (to
->si_code
== SI_USER
)
999 to
->cpt_si_pid
= from
->si_pid
;
1000 to
->cpt_si_uid
= from
->si_uid
;
1002 else if (to
->si_code
< 0)
1004 to
->cpt_si_pid
= from
->si_pid
;
1005 to
->cpt_si_uid
= from
->si_uid
;
1006 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1010 switch (to
->si_signo
)
1013 to
->cpt_si_pid
= from
->si_pid
;
1014 to
->cpt_si_uid
= from
->si_uid
;
1015 to
->cpt_si_status
= from
->si_status
;
1016 to
->cpt_si_utime
= from
->si_utime
;
1017 to
->cpt_si_stime
= from
->si_stime
;
1023 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1026 to
->cpt_si_band
= from
->si_band
;
1027 to
->cpt_si_fd
= from
->si_fd
;
1030 to
->cpt_si_pid
= from
->si_pid
;
1031 to
->cpt_si_uid
= from
->si_uid
;
1032 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1039 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1040 compat_x32_siginfo_t
*from
)
1042 memset (to
, 0, sizeof (*to
));
1044 to
->si_signo
= from
->si_signo
;
1045 to
->si_errno
= from
->si_errno
;
1046 to
->si_code
= from
->si_code
;
1048 if (to
->si_code
== SI_TIMER
)
1050 to
->si_timerid
= from
->cpt_si_timerid
;
1051 to
->si_overrun
= from
->cpt_si_overrun
;
1052 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1054 else if (to
->si_code
== SI_USER
)
1056 to
->si_pid
= from
->cpt_si_pid
;
1057 to
->si_uid
= from
->cpt_si_uid
;
1059 else if (to
->si_code
< 0)
1061 to
->si_pid
= from
->cpt_si_pid
;
1062 to
->si_uid
= from
->cpt_si_uid
;
1063 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1067 switch (to
->si_signo
)
1070 to
->si_pid
= from
->cpt_si_pid
;
1071 to
->si_uid
= from
->cpt_si_uid
;
1072 to
->si_status
= from
->cpt_si_status
;
1073 to
->si_utime
= from
->cpt_si_utime
;
1074 to
->si_stime
= from
->cpt_si_stime
;
1080 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1083 to
->si_band
= from
->cpt_si_band
;
1084 to
->si_fd
= from
->cpt_si_fd
;
1087 to
->si_pid
= from
->cpt_si_pid
;
1088 to
->si_uid
= from
->cpt_si_uid
;
1089 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1095 /* Is this process 64-bit? */
1096 static int linux_is_elf64
;
1097 #endif /* __x86_64__ */
1099 /* Convert a native/host siginfo object, into/from the siginfo in the
1100 layout of the inferiors' architecture. Returns true if any
1101 conversion was done; false otherwise. If DIRECTION is 1, then copy
1102 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1106 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1109 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1110 if (register_size (0) == 4)
1112 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1113 fatal ("unexpected difference in siginfo");
1116 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1118 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1122 /* No fixup for native x32 GDB. */
1123 else if (!linux_is_elf64
&& sizeof (void *) == 8)
1125 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1126 fatal ("unexpected difference in siginfo");
1129 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1132 siginfo_from_compat_x32_siginfo (native
,
1133 (struct compat_x32_siginfo
*) inf
);
1144 /* Update gdbserver_xmltarget. */
1147 x86_linux_update_xmltarget (void)
1150 struct regset_info
*regset
;
1151 static unsigned long long xcr0
;
1152 static int have_ptrace_getregset
= -1;
1153 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
1154 static int have_ptrace_getfpxregs
= -1;
1157 if (!current_inferior
)
1160 /* Before changing the register cache internal layout or the target
1161 regsets, flush the contents of the current valid caches back to
1163 regcache_invalidate ();
1165 pid
= pid_of (get_thread_lwp (current_inferior
));
1167 if (num_xmm_registers
== 8)
1168 init_registers_i386_linux ();
1169 else if (linux_is_elf64
)
1170 init_registers_amd64_linux ();
1172 init_registers_x32_linux ();
1175 # ifdef HAVE_PTRACE_GETFPXREGS
1176 if (have_ptrace_getfpxregs
== -1)
1178 elf_fpxregset_t fpxregs
;
1180 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
1182 have_ptrace_getfpxregs
= 0;
1183 x86_xcr0
= I386_XSTATE_X87_MASK
;
1185 /* Disable PTRACE_GETFPXREGS. */
1186 for (regset
= target_regsets
;
1187 regset
->fill_function
!= NULL
; regset
++)
1188 if (regset
->get_request
== PTRACE_GETFPXREGS
)
1195 have_ptrace_getfpxregs
= 1;
1198 if (!have_ptrace_getfpxregs
)
1200 init_registers_i386_mmx_linux ();
1204 init_registers_i386_linux ();
1210 /* Don't use XML. */
1212 if (num_xmm_registers
== 8)
1213 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1215 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1217 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1220 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1225 /* Check if XSAVE extended state is supported. */
1226 if (have_ptrace_getregset
== -1)
1228 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1231 iov
.iov_base
= xstateregs
;
1232 iov
.iov_len
= sizeof (xstateregs
);
1234 /* Check if PTRACE_GETREGSET works. */
1235 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1238 have_ptrace_getregset
= 0;
1242 have_ptrace_getregset
= 1;
1244 /* Get XCR0 from XSAVE extended state at byte 464. */
1245 xcr0
= xstateregs
[464 / sizeof (long long)];
1247 /* Use PTRACE_GETREGSET if it is available. */
1248 for (regset
= target_regsets
;
1249 regset
->fill_function
!= NULL
; regset
++)
1250 if (regset
->get_request
== PTRACE_GETREGSET
)
1251 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1252 else if (regset
->type
!= GENERAL_REGS
)
1256 if (have_ptrace_getregset
)
1258 /* AVX is the highest feature we support. */
1259 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1264 /* I386 has 8 xmm regs. */
1265 if (num_xmm_registers
== 8)
1266 init_registers_i386_avx_linux ();
1267 else if (linux_is_elf64
)
1268 init_registers_amd64_avx_linux ();
1270 init_registers_x32_avx_linux ();
1272 init_registers_i386_avx_linux ();
1278 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1279 PTRACE_GETREGSET. */
1282 x86_linux_process_qsupported (const char *query
)
1284 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1285 with "i386" in qSupported query, it supports x86 XML target
1288 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1290 char *copy
= xstrdup (query
+ 13);
1293 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1295 if (strcmp (p
, "i386") == 0)
1305 x86_linux_update_xmltarget ();
1308 /* Initialize gdbserver for the architecture of the inferior. */
1311 x86_arch_setup (void)
1313 int pid
= pid_of (get_thread_lwp (current_inferior
));
1314 unsigned int machine
;
1315 int is_elf64
= linux_pid_exe_is_elf_64_file (pid
, &machine
);
1317 if (sizeof (void *) == 4)
1320 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1322 else if (machine
== EM_X86_64
)
1323 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1330 /* This can only happen if /proc/<pid>/exe is unreadable,
1331 but "that can't happen" if we've gotten this far.
1332 Fall through and assume this is a 32-bit program. */
1334 else if (machine
== EM_X86_64
)
1336 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1337 the_low_target
.num_regs
= -1;
1338 the_low_target
.regmap
= NULL
;
1339 the_low_target
.cannot_fetch_register
= NULL
;
1340 the_low_target
.cannot_store_register
= NULL
;
1342 /* Amd64 has 16 xmm regs. */
1343 num_xmm_registers
= 16;
1345 linux_is_elf64
= is_elf64
;
1346 x86_linux_update_xmltarget ();
1353 /* Ok we have a 32-bit inferior. */
1355 the_low_target
.num_regs
= I386_NUM_REGS
;
1356 the_low_target
.regmap
= i386_regmap
;
1357 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1358 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1360 /* I386 has 8 xmm regs. */
1361 num_xmm_registers
= 8;
1363 x86_linux_update_xmltarget ();
1367 x86_supports_tracepoints (void)
1373 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1375 write_inferior_memory (*to
, buf
, len
);
1380 push_opcode (unsigned char *buf
, char *op
)
1382 unsigned char *buf_org
= buf
;
1387 unsigned long ul
= strtoul (op
, &endptr
, 16);
1396 return buf
- buf_org
;
1401 /* Build a jump pad that saves registers and calls a collection
1402 function. Writes a jump instruction to the jump pad to
1403 JJUMPAD_INSN. The caller is responsible to write it in at the
1404 tracepoint address. */
1407 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1408 CORE_ADDR collector
,
1411 CORE_ADDR
*jump_entry
,
1412 CORE_ADDR
*trampoline
,
1413 ULONGEST
*trampoline_size
,
1414 unsigned char *jjump_pad_insn
,
1415 ULONGEST
*jjump_pad_insn_size
,
1416 CORE_ADDR
*adjusted_insn_addr
,
1417 CORE_ADDR
*adjusted_insn_addr_end
,
1420 unsigned char buf
[40];
1424 CORE_ADDR buildaddr
= *jump_entry
;
1426 /* Build the jump pad. */
1428 /* First, do tracepoint data collection. Save registers. */
1430 /* Need to ensure stack pointer saved first. */
1431 buf
[i
++] = 0x54; /* push %rsp */
1432 buf
[i
++] = 0x55; /* push %rbp */
1433 buf
[i
++] = 0x57; /* push %rdi */
1434 buf
[i
++] = 0x56; /* push %rsi */
1435 buf
[i
++] = 0x52; /* push %rdx */
1436 buf
[i
++] = 0x51; /* push %rcx */
1437 buf
[i
++] = 0x53; /* push %rbx */
1438 buf
[i
++] = 0x50; /* push %rax */
1439 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1440 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1441 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1442 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1443 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1444 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1445 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1446 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1447 buf
[i
++] = 0x9c; /* pushfq */
1448 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1450 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1451 i
+= sizeof (unsigned long);
1452 buf
[i
++] = 0x57; /* push %rdi */
1453 append_insns (&buildaddr
, i
, buf
);
1455 /* Stack space for the collecting_t object. */
1457 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1458 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1459 memcpy (buf
+ i
, &tpoint
, 8);
1461 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1462 i
+= push_opcode (&buf
[i
],
1463 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1464 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1465 append_insns (&buildaddr
, i
, buf
);
1469 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1470 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1472 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1473 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1474 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1475 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1476 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1477 append_insns (&buildaddr
, i
, buf
);
1479 /* Set up the gdb_collect call. */
1480 /* At this point, (stack pointer + 0x18) is the base of our saved
1484 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1485 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1487 /* tpoint address may be 64-bit wide. */
1488 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1489 memcpy (buf
+ i
, &tpoint
, 8);
1491 append_insns (&buildaddr
, i
, buf
);
1493 /* The collector function being in the shared library, may be
1494 >31-bits away off the jump pad. */
1496 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1497 memcpy (buf
+ i
, &collector
, 8);
1499 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1500 append_insns (&buildaddr
, i
, buf
);
1502 /* Clear the spin-lock. */
1504 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1505 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1506 memcpy (buf
+ i
, &lockaddr
, 8);
1508 append_insns (&buildaddr
, i
, buf
);
1510 /* Remove stack that had been used for the collect_t object. */
1512 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1513 append_insns (&buildaddr
, i
, buf
);
1515 /* Restore register state. */
1517 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1521 buf
[i
++] = 0x9d; /* popfq */
1522 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1523 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1524 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1525 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1526 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1527 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1528 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1529 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1530 buf
[i
++] = 0x58; /* pop %rax */
1531 buf
[i
++] = 0x5b; /* pop %rbx */
1532 buf
[i
++] = 0x59; /* pop %rcx */
1533 buf
[i
++] = 0x5a; /* pop %rdx */
1534 buf
[i
++] = 0x5e; /* pop %rsi */
1535 buf
[i
++] = 0x5f; /* pop %rdi */
1536 buf
[i
++] = 0x5d; /* pop %rbp */
1537 buf
[i
++] = 0x5c; /* pop %rsp */
1538 append_insns (&buildaddr
, i
, buf
);
1540 /* Now, adjust the original instruction to execute in the jump
1542 *adjusted_insn_addr
= buildaddr
;
1543 relocate_instruction (&buildaddr
, tpaddr
);
1544 *adjusted_insn_addr_end
= buildaddr
;
1546 /* Finally, write a jump back to the program. */
1548 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1549 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1552 "E.Jump back from jump pad too far from tracepoint "
1553 "(offset 0x%" PRIx64
" > int32).", loffset
);
1557 offset
= (int) loffset
;
1558 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1559 memcpy (buf
+ 1, &offset
, 4);
1560 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1562 /* The jump pad is now built. Wire in a jump to our jump pad. This
1563 is always done last (by our caller actually), so that we can
1564 install fast tracepoints with threads running. This relies on
1565 the agent's atomic write support. */
1566 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1567 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1570 "E.Jump pad too far from tracepoint "
1571 "(offset 0x%" PRIx64
" > int32).", loffset
);
1575 offset
= (int) loffset
;
1577 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1578 memcpy (buf
+ 1, &offset
, 4);
1579 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1580 *jjump_pad_insn_size
= sizeof (jump_insn
);
1582 /* Return the end address of our pad. */
1583 *jump_entry
= buildaddr
;
1588 #endif /* __x86_64__ */
1590 /* Build a jump pad that saves registers and calls a collection
1591 function. Writes a jump instruction to the jump pad to
1592 JJUMPAD_INSN. The caller is responsible to write it in at the
1593 tracepoint address. */
1596 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1597 CORE_ADDR collector
,
1600 CORE_ADDR
*jump_entry
,
1601 CORE_ADDR
*trampoline
,
1602 ULONGEST
*trampoline_size
,
1603 unsigned char *jjump_pad_insn
,
1604 ULONGEST
*jjump_pad_insn_size
,
1605 CORE_ADDR
*adjusted_insn_addr
,
1606 CORE_ADDR
*adjusted_insn_addr_end
,
1609 unsigned char buf
[0x100];
1611 CORE_ADDR buildaddr
= *jump_entry
;
1613 /* Build the jump pad. */
1615 /* First, do tracepoint data collection. Save registers. */
1617 buf
[i
++] = 0x60; /* pushad */
1618 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1619 *((int *)(buf
+ i
)) = (int) tpaddr
;
1621 buf
[i
++] = 0x9c; /* pushf */
1622 buf
[i
++] = 0x1e; /* push %ds */
1623 buf
[i
++] = 0x06; /* push %es */
1624 buf
[i
++] = 0x0f; /* push %fs */
1626 buf
[i
++] = 0x0f; /* push %gs */
1628 buf
[i
++] = 0x16; /* push %ss */
1629 buf
[i
++] = 0x0e; /* push %cs */
1630 append_insns (&buildaddr
, i
, buf
);
1632 /* Stack space for the collecting_t object. */
1634 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1636 /* Build the object. */
1637 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1638 memcpy (buf
+ i
, &tpoint
, 4);
1640 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1642 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1643 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1644 append_insns (&buildaddr
, i
, buf
);
1646 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1647 If we cared for it, this could be using xchg alternatively. */
1650 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1651 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1653 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1655 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1656 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1657 append_insns (&buildaddr
, i
, buf
);
1660 /* Set up arguments to the gdb_collect call. */
1662 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1663 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1664 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1665 append_insns (&buildaddr
, i
, buf
);
1668 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1669 append_insns (&buildaddr
, i
, buf
);
1672 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1673 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1675 append_insns (&buildaddr
, i
, buf
);
1677 buf
[0] = 0xe8; /* call <reladdr> */
1678 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1679 memcpy (buf
+ 1, &offset
, 4);
1680 append_insns (&buildaddr
, 5, buf
);
1681 /* Clean up after the call. */
1682 buf
[0] = 0x83; /* add $0x8,%esp */
1685 append_insns (&buildaddr
, 3, buf
);
1688 /* Clear the spin-lock. This would need the LOCK prefix on older
1691 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1692 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1693 memcpy (buf
+ i
, &lockaddr
, 4);
1695 append_insns (&buildaddr
, i
, buf
);
1698 /* Remove stack that had been used for the collect_t object. */
1700 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1701 append_insns (&buildaddr
, i
, buf
);
1704 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1707 buf
[i
++] = 0x17; /* pop %ss */
1708 buf
[i
++] = 0x0f; /* pop %gs */
1710 buf
[i
++] = 0x0f; /* pop %fs */
1712 buf
[i
++] = 0x07; /* pop %es */
1713 buf
[i
++] = 0x1f; /* pop %ds */
1714 buf
[i
++] = 0x9d; /* popf */
1715 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1718 buf
[i
++] = 0x61; /* popad */
1719 append_insns (&buildaddr
, i
, buf
);
1721 /* Now, adjust the original instruction to execute in the jump
1723 *adjusted_insn_addr
= buildaddr
;
1724 relocate_instruction (&buildaddr
, tpaddr
);
1725 *adjusted_insn_addr_end
= buildaddr
;
1727 /* Write the jump back to the program. */
1728 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1729 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1730 memcpy (buf
+ 1, &offset
, 4);
1731 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1733 /* The jump pad is now built. Wire in a jump to our jump pad. This
1734 is always done last (by our caller actually), so that we can
1735 install fast tracepoints with threads running. This relies on
1736 the agent's atomic write support. */
1739 /* Create a trampoline. */
1740 *trampoline_size
= sizeof (jump_insn
);
1741 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1743 /* No trampoline space available. */
1745 "E.Cannot allocate trampoline space needed for fast "
1746 "tracepoints on 4-byte instructions.");
1750 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1751 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1752 memcpy (buf
+ 1, &offset
, 4);
1753 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1755 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1756 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1757 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1758 memcpy (buf
+ 2, &offset
, 2);
1759 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1760 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1764 /* Else use a 32-bit relative jump instruction. */
1765 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1766 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1767 memcpy (buf
+ 1, &offset
, 4);
1768 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1769 *jjump_pad_insn_size
= sizeof (jump_insn
);
1772 /* Return the end address of our pad. */
1773 *jump_entry
= buildaddr
;
1779 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1780 CORE_ADDR collector
,
1783 CORE_ADDR
*jump_entry
,
1784 CORE_ADDR
*trampoline
,
1785 ULONGEST
*trampoline_size
,
1786 unsigned char *jjump_pad_insn
,
1787 ULONGEST
*jjump_pad_insn_size
,
1788 CORE_ADDR
*adjusted_insn_addr
,
1789 CORE_ADDR
*adjusted_insn_addr_end
,
1793 if (register_size (0) == 8)
1794 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1795 collector
, lockaddr
,
1796 orig_size
, jump_entry
,
1797 trampoline
, trampoline_size
,
1799 jjump_pad_insn_size
,
1801 adjusted_insn_addr_end
,
1805 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1806 collector
, lockaddr
,
1807 orig_size
, jump_entry
,
1808 trampoline
, trampoline_size
,
1810 jjump_pad_insn_size
,
1812 adjusted_insn_addr_end
,
1816 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1820 x86_get_min_fast_tracepoint_insn_len (void)
1822 static int warned_about_fast_tracepoints
= 0;
1825 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1826 used for fast tracepoints. */
1827 if (register_size (0) == 8)
1831 if (agent_loaded_p ())
1833 char errbuf
[IPA_BUFSIZ
];
1837 /* On x86, if trampolines are available, then 4-byte jump instructions
1838 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1839 with a 4-byte offset are used instead. */
1840 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1844 /* GDB has no channel to explain to user why a shorter fast
1845 tracepoint is not possible, but at least make GDBserver
1846 mention that something has gone awry. */
1847 if (!warned_about_fast_tracepoints
)
1849 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1850 warned_about_fast_tracepoints
= 1;
1857 /* Indicate that the minimum length is currently unknown since the IPA
1858 has not loaded yet. */
1864 add_insns (unsigned char *start
, int len
)
1866 CORE_ADDR buildaddr
= current_insn_ptr
;
1869 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1870 len
, paddress (buildaddr
));
1872 append_insns (&buildaddr
, len
, start
);
1873 current_insn_ptr
= buildaddr
;
1876 /* Our general strategy for emitting code is to avoid specifying raw
1877 bytes whenever possible, and instead copy a block of inline asm
1878 that is embedded in the function. This is a little messy, because
1879 we need to keep the compiler from discarding what looks like dead
1880 code, plus suppress various warnings. */
1882 #define EMIT_ASM(NAME, INSNS) \
1885 extern unsigned char start_ ## NAME, end_ ## NAME; \
1886 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1887 __asm__ ("jmp end_" #NAME "\n" \
1888 "\t" "start_" #NAME ":" \
1890 "\t" "end_" #NAME ":"); \
1895 #define EMIT_ASM32(NAME,INSNS) \
1898 extern unsigned char start_ ## NAME, end_ ## NAME; \
1899 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1900 __asm__ (".code32\n" \
1901 "\t" "jmp end_" #NAME "\n" \
1902 "\t" "start_" #NAME ":\n" \
1904 "\t" "end_" #NAME ":\n" \
1910 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1917 amd64_emit_prologue (void)
1919 EMIT_ASM (amd64_prologue
,
1921 "movq %rsp,%rbp\n\t"
1922 "sub $0x20,%rsp\n\t"
1923 "movq %rdi,-8(%rbp)\n\t"
1924 "movq %rsi,-16(%rbp)");
1929 amd64_emit_epilogue (void)
1931 EMIT_ASM (amd64_epilogue
,
1932 "movq -16(%rbp),%rdi\n\t"
1933 "movq %rax,(%rdi)\n\t"
1940 amd64_emit_add (void)
1942 EMIT_ASM (amd64_add
,
1943 "add (%rsp),%rax\n\t"
1944 "lea 0x8(%rsp),%rsp");
1948 amd64_emit_sub (void)
1950 EMIT_ASM (amd64_sub
,
1951 "sub %rax,(%rsp)\n\t"
1956 amd64_emit_mul (void)
1962 amd64_emit_lsh (void)
1968 amd64_emit_rsh_signed (void)
1974 amd64_emit_rsh_unsigned (void)
1980 amd64_emit_ext (int arg
)
1985 EMIT_ASM (amd64_ext_8
,
1991 EMIT_ASM (amd64_ext_16
,
1996 EMIT_ASM (amd64_ext_32
,
2005 amd64_emit_log_not (void)
2007 EMIT_ASM (amd64_log_not
,
2008 "test %rax,%rax\n\t"
2014 amd64_emit_bit_and (void)
2016 EMIT_ASM (amd64_and
,
2017 "and (%rsp),%rax\n\t"
2018 "lea 0x8(%rsp),%rsp");
2022 amd64_emit_bit_or (void)
2025 "or (%rsp),%rax\n\t"
2026 "lea 0x8(%rsp),%rsp");
2030 amd64_emit_bit_xor (void)
2032 EMIT_ASM (amd64_xor
,
2033 "xor (%rsp),%rax\n\t"
2034 "lea 0x8(%rsp),%rsp");
2038 amd64_emit_bit_not (void)
2040 EMIT_ASM (amd64_bit_not
,
2041 "xorq $0xffffffffffffffff,%rax");
2045 amd64_emit_equal (void)
2047 EMIT_ASM (amd64_equal
,
2048 "cmp %rax,(%rsp)\n\t"
2049 "je .Lamd64_equal_true\n\t"
2051 "jmp .Lamd64_equal_end\n\t"
2052 ".Lamd64_equal_true:\n\t"
2054 ".Lamd64_equal_end:\n\t"
2055 "lea 0x8(%rsp),%rsp");
2059 amd64_emit_less_signed (void)
2061 EMIT_ASM (amd64_less_signed
,
2062 "cmp %rax,(%rsp)\n\t"
2063 "jl .Lamd64_less_signed_true\n\t"
2065 "jmp .Lamd64_less_signed_end\n\t"
2066 ".Lamd64_less_signed_true:\n\t"
2068 ".Lamd64_less_signed_end:\n\t"
2069 "lea 0x8(%rsp),%rsp");
2073 amd64_emit_less_unsigned (void)
2075 EMIT_ASM (amd64_less_unsigned
,
2076 "cmp %rax,(%rsp)\n\t"
2077 "jb .Lamd64_less_unsigned_true\n\t"
2079 "jmp .Lamd64_less_unsigned_end\n\t"
2080 ".Lamd64_less_unsigned_true:\n\t"
2082 ".Lamd64_less_unsigned_end:\n\t"
2083 "lea 0x8(%rsp),%rsp");
2087 amd64_emit_ref (int size
)
2092 EMIT_ASM (amd64_ref1
,
2096 EMIT_ASM (amd64_ref2
,
2100 EMIT_ASM (amd64_ref4
,
2101 "movl (%rax),%eax");
2104 EMIT_ASM (amd64_ref8
,
2105 "movq (%rax),%rax");
2111 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2113 EMIT_ASM (amd64_if_goto
,
2117 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2125 amd64_emit_goto (int *offset_p
, int *size_p
)
2127 EMIT_ASM (amd64_goto
,
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2136 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2138 int diff
= (to
- (from
+ size
));
2139 unsigned char buf
[sizeof (int)];
2147 memcpy (buf
, &diff
, sizeof (int));
2148 write_inferior_memory (from
, buf
, sizeof (int));
2152 amd64_emit_const (LONGEST num
)
2154 unsigned char buf
[16];
2156 CORE_ADDR buildaddr
= current_insn_ptr
;
2159 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2160 memcpy (&buf
[i
], &num
, sizeof (num
));
2162 append_insns (&buildaddr
, i
, buf
);
2163 current_insn_ptr
= buildaddr
;
2167 amd64_emit_call (CORE_ADDR fn
)
2169 unsigned char buf
[16];
2171 CORE_ADDR buildaddr
;
2174 /* The destination function being in the shared library, may be
2175 >31-bits away off the compiled code pad. */
2177 buildaddr
= current_insn_ptr
;
2179 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2183 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2185 /* Offset is too large for a call. Use callq, but that requires
2186 a register, so avoid it if possible. Use r10, since it is
2187 call-clobbered, we don't have to push/pop it. */
2188 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2190 memcpy (buf
+ i
, &fn
, 8);
2192 buf
[i
++] = 0xff; /* callq *%r10 */
2197 int offset32
= offset64
; /* we know we can't overflow here. */
2198 memcpy (buf
+ i
, &offset32
, 4);
2202 append_insns (&buildaddr
, i
, buf
);
2203 current_insn_ptr
= buildaddr
;
2207 amd64_emit_reg (int reg
)
2209 unsigned char buf
[16];
2211 CORE_ADDR buildaddr
;
2213 /* Assume raw_regs is still in %rdi. */
2214 buildaddr
= current_insn_ptr
;
2216 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2217 memcpy (&buf
[i
], ®
, sizeof (reg
));
2219 append_insns (&buildaddr
, i
, buf
);
2220 current_insn_ptr
= buildaddr
;
2221 amd64_emit_call (get_raw_reg_func_addr ());
2225 amd64_emit_pop (void)
2227 EMIT_ASM (amd64_pop
,
2232 amd64_emit_stack_flush (void)
2234 EMIT_ASM (amd64_stack_flush
,
2239 amd64_emit_zero_ext (int arg
)
2244 EMIT_ASM (amd64_zero_ext_8
,
2248 EMIT_ASM (amd64_zero_ext_16
,
2249 "and $0xffff,%rax");
2252 EMIT_ASM (amd64_zero_ext_32
,
2253 "mov $0xffffffff,%rcx\n\t"
2262 amd64_emit_swap (void)
2264 EMIT_ASM (amd64_swap
,
2271 amd64_emit_stack_adjust (int n
)
2273 unsigned char buf
[16];
2275 CORE_ADDR buildaddr
= current_insn_ptr
;
2278 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2282 /* This only handles adjustments up to 16, but we don't expect any more. */
2284 append_insns (&buildaddr
, i
, buf
);
2285 current_insn_ptr
= buildaddr
;
2288 /* FN's prototype is `LONGEST(*fn)(int)'. */
2291 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2293 unsigned char buf
[16];
2295 CORE_ADDR buildaddr
;
2297 buildaddr
= current_insn_ptr
;
2299 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2300 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2302 append_insns (&buildaddr
, i
, buf
);
2303 current_insn_ptr
= buildaddr
;
2304 amd64_emit_call (fn
);
2307 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2310 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2312 unsigned char buf
[16];
2314 CORE_ADDR buildaddr
;
2316 buildaddr
= current_insn_ptr
;
2318 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2319 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2321 append_insns (&buildaddr
, i
, buf
);
2322 current_insn_ptr
= buildaddr
;
2323 EMIT_ASM (amd64_void_call_2_a
,
2324 /* Save away a copy of the stack top. */
2326 /* Also pass top as the second argument. */
2328 amd64_emit_call (fn
);
2329 EMIT_ASM (amd64_void_call_2_b
,
2330 /* Restore the stack top, %rax may have been trashed. */
2335 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2338 "cmp %rax,(%rsp)\n\t"
2339 "jne .Lamd64_eq_fallthru\n\t"
2340 "lea 0x8(%rsp),%rsp\n\t"
2342 /* jmp, but don't trust the assembler to choose the right jump */
2343 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2344 ".Lamd64_eq_fallthru:\n\t"
2345 "lea 0x8(%rsp),%rsp\n\t"
2355 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2358 "cmp %rax,(%rsp)\n\t"
2359 "je .Lamd64_ne_fallthru\n\t"
2360 "lea 0x8(%rsp),%rsp\n\t"
2362 /* jmp, but don't trust the assembler to choose the right jump */
2363 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2364 ".Lamd64_ne_fallthru:\n\t"
2365 "lea 0x8(%rsp),%rsp\n\t"
2375 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2378 "cmp %rax,(%rsp)\n\t"
2379 "jnl .Lamd64_lt_fallthru\n\t"
2380 "lea 0x8(%rsp),%rsp\n\t"
2382 /* jmp, but don't trust the assembler to choose the right jump */
2383 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2384 ".Lamd64_lt_fallthru:\n\t"
2385 "lea 0x8(%rsp),%rsp\n\t"
2395 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2398 "cmp %rax,(%rsp)\n\t"
2399 "jnle .Lamd64_le_fallthru\n\t"
2400 "lea 0x8(%rsp),%rsp\n\t"
2402 /* jmp, but don't trust the assembler to choose the right jump */
2403 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2404 ".Lamd64_le_fallthru:\n\t"
2405 "lea 0x8(%rsp),%rsp\n\t"
2415 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2418 "cmp %rax,(%rsp)\n\t"
2419 "jng .Lamd64_gt_fallthru\n\t"
2420 "lea 0x8(%rsp),%rsp\n\t"
2422 /* jmp, but don't trust the assembler to choose the right jump */
2423 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2424 ".Lamd64_gt_fallthru:\n\t"
2425 "lea 0x8(%rsp),%rsp\n\t"
2435 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2438 "cmp %rax,(%rsp)\n\t"
2439 "jnge .Lamd64_ge_fallthru\n\t"
2440 ".Lamd64_ge_jump:\n\t"
2441 "lea 0x8(%rsp),%rsp\n\t"
2443 /* jmp, but don't trust the assembler to choose the right jump */
2444 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2445 ".Lamd64_ge_fallthru:\n\t"
2446 "lea 0x8(%rsp),%rsp\n\t"
2455 struct emit_ops amd64_emit_ops
=
2457 amd64_emit_prologue
,
2458 amd64_emit_epilogue
,
2463 amd64_emit_rsh_signed
,
2464 amd64_emit_rsh_unsigned
,
2472 amd64_emit_less_signed
,
2473 amd64_emit_less_unsigned
,
2477 amd64_write_goto_address
,
2482 amd64_emit_stack_flush
,
2483 amd64_emit_zero_ext
,
2485 amd64_emit_stack_adjust
,
2486 amd64_emit_int_call_1
,
2487 amd64_emit_void_call_2
,
2496 #endif /* __x86_64__ */
2499 i386_emit_prologue (void)
2501 EMIT_ASM32 (i386_prologue
,
2505 /* At this point, the raw regs base address is at 8(%ebp), and the
2506 value pointer is at 12(%ebp). */
2510 i386_emit_epilogue (void)
2512 EMIT_ASM32 (i386_epilogue
,
2513 "mov 12(%ebp),%ecx\n\t"
2514 "mov %eax,(%ecx)\n\t"
2515 "mov %ebx,0x4(%ecx)\n\t"
2523 i386_emit_add (void)
2525 EMIT_ASM32 (i386_add
,
2526 "add (%esp),%eax\n\t"
2527 "adc 0x4(%esp),%ebx\n\t"
2528 "lea 0x8(%esp),%esp");
2532 i386_emit_sub (void)
2534 EMIT_ASM32 (i386_sub
,
2535 "subl %eax,(%esp)\n\t"
2536 "sbbl %ebx,4(%esp)\n\t"
2542 i386_emit_mul (void)
2548 i386_emit_lsh (void)
2554 i386_emit_rsh_signed (void)
2560 i386_emit_rsh_unsigned (void)
2566 i386_emit_ext (int arg
)
2571 EMIT_ASM32 (i386_ext_8
,
2574 "movl %eax,%ebx\n\t"
2578 EMIT_ASM32 (i386_ext_16
,
2580 "movl %eax,%ebx\n\t"
2584 EMIT_ASM32 (i386_ext_32
,
2585 "movl %eax,%ebx\n\t"
2594 i386_emit_log_not (void)
2596 EMIT_ASM32 (i386_log_not
,
2598 "test %eax,%eax\n\t"
2605 i386_emit_bit_and (void)
2607 EMIT_ASM32 (i386_and
,
2608 "and (%esp),%eax\n\t"
2609 "and 0x4(%esp),%ebx\n\t"
2610 "lea 0x8(%esp),%esp");
2614 i386_emit_bit_or (void)
2616 EMIT_ASM32 (i386_or
,
2617 "or (%esp),%eax\n\t"
2618 "or 0x4(%esp),%ebx\n\t"
2619 "lea 0x8(%esp),%esp");
2623 i386_emit_bit_xor (void)
2625 EMIT_ASM32 (i386_xor
,
2626 "xor (%esp),%eax\n\t"
2627 "xor 0x4(%esp),%ebx\n\t"
2628 "lea 0x8(%esp),%esp");
2632 i386_emit_bit_not (void)
2634 EMIT_ASM32 (i386_bit_not
,
2635 "xor $0xffffffff,%eax\n\t"
2636 "xor $0xffffffff,%ebx\n\t");
2640 i386_emit_equal (void)
2642 EMIT_ASM32 (i386_equal
,
2643 "cmpl %ebx,4(%esp)\n\t"
2644 "jne .Li386_equal_false\n\t"
2645 "cmpl %eax,(%esp)\n\t"
2646 "je .Li386_equal_true\n\t"
2647 ".Li386_equal_false:\n\t"
2649 "jmp .Li386_equal_end\n\t"
2650 ".Li386_equal_true:\n\t"
2652 ".Li386_equal_end:\n\t"
2654 "lea 0x8(%esp),%esp");
2658 i386_emit_less_signed (void)
2660 EMIT_ASM32 (i386_less_signed
,
2661 "cmpl %ebx,4(%esp)\n\t"
2662 "jl .Li386_less_signed_true\n\t"
2663 "jne .Li386_less_signed_false\n\t"
2664 "cmpl %eax,(%esp)\n\t"
2665 "jl .Li386_less_signed_true\n\t"
2666 ".Li386_less_signed_false:\n\t"
2668 "jmp .Li386_less_signed_end\n\t"
2669 ".Li386_less_signed_true:\n\t"
2671 ".Li386_less_signed_end:\n\t"
2673 "lea 0x8(%esp),%esp");
2677 i386_emit_less_unsigned (void)
2679 EMIT_ASM32 (i386_less_unsigned
,
2680 "cmpl %ebx,4(%esp)\n\t"
2681 "jb .Li386_less_unsigned_true\n\t"
2682 "jne .Li386_less_unsigned_false\n\t"
2683 "cmpl %eax,(%esp)\n\t"
2684 "jb .Li386_less_unsigned_true\n\t"
2685 ".Li386_less_unsigned_false:\n\t"
2687 "jmp .Li386_less_unsigned_end\n\t"
2688 ".Li386_less_unsigned_true:\n\t"
2690 ".Li386_less_unsigned_end:\n\t"
2692 "lea 0x8(%esp),%esp");
2696 i386_emit_ref (int size
)
2701 EMIT_ASM32 (i386_ref1
,
2705 EMIT_ASM32 (i386_ref2
,
2709 EMIT_ASM32 (i386_ref4
,
2710 "movl (%eax),%eax");
2713 EMIT_ASM32 (i386_ref8
,
2714 "movl 4(%eax),%ebx\n\t"
2715 "movl (%eax),%eax");
2721 i386_emit_if_goto (int *offset_p
, int *size_p
)
2723 EMIT_ASM32 (i386_if_goto
,
2729 /* Don't trust the assembler to choose the right jump */
2730 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2733 *offset_p
= 11; /* be sure that this matches the sequence above */
2739 i386_emit_goto (int *offset_p
, int *size_p
)
2741 EMIT_ASM32 (i386_goto
,
2742 /* Don't trust the assembler to choose the right jump */
2743 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2751 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2753 int diff
= (to
- (from
+ size
));
2754 unsigned char buf
[sizeof (int)];
2756 /* We're only doing 4-byte sizes at the moment. */
2763 memcpy (buf
, &diff
, sizeof (int));
2764 write_inferior_memory (from
, buf
, sizeof (int));
2768 i386_emit_const (LONGEST num
)
2770 unsigned char buf
[16];
2772 CORE_ADDR buildaddr
= current_insn_ptr
;
2775 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2776 lo
= num
& 0xffffffff;
2777 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2779 hi
= ((num
>> 32) & 0xffffffff);
2782 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2783 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2788 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2790 append_insns (&buildaddr
, i
, buf
);
2791 current_insn_ptr
= buildaddr
;
2795 i386_emit_call (CORE_ADDR fn
)
2797 unsigned char buf
[16];
2799 CORE_ADDR buildaddr
;
2801 buildaddr
= current_insn_ptr
;
2803 buf
[i
++] = 0xe8; /* call <reladdr> */
2804 offset
= ((int) fn
) - (buildaddr
+ 5);
2805 memcpy (buf
+ 1, &offset
, 4);
2806 append_insns (&buildaddr
, 5, buf
);
2807 current_insn_ptr
= buildaddr
;
2811 i386_emit_reg (int reg
)
2813 unsigned char buf
[16];
2815 CORE_ADDR buildaddr
;
2817 EMIT_ASM32 (i386_reg_a
,
2819 buildaddr
= current_insn_ptr
;
2821 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2822 memcpy (&buf
[i
], ®
, sizeof (reg
));
2824 append_insns (&buildaddr
, i
, buf
);
2825 current_insn_ptr
= buildaddr
;
2826 EMIT_ASM32 (i386_reg_b
,
2827 "mov %eax,4(%esp)\n\t"
2828 "mov 8(%ebp),%eax\n\t"
2830 i386_emit_call (get_raw_reg_func_addr ());
2831 EMIT_ASM32 (i386_reg_c
,
2833 "lea 0x8(%esp),%esp");
2837 i386_emit_pop (void)
2839 EMIT_ASM32 (i386_pop
,
2845 i386_emit_stack_flush (void)
2847 EMIT_ASM32 (i386_stack_flush
,
2853 i386_emit_zero_ext (int arg
)
2858 EMIT_ASM32 (i386_zero_ext_8
,
2859 "and $0xff,%eax\n\t"
2863 EMIT_ASM32 (i386_zero_ext_16
,
2864 "and $0xffff,%eax\n\t"
2868 EMIT_ASM32 (i386_zero_ext_32
,
2877 i386_emit_swap (void)
2879 EMIT_ASM32 (i386_swap
,
2889 i386_emit_stack_adjust (int n
)
2891 unsigned char buf
[16];
2893 CORE_ADDR buildaddr
= current_insn_ptr
;
2896 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2900 append_insns (&buildaddr
, i
, buf
);
2901 current_insn_ptr
= buildaddr
;
2904 /* FN's prototype is `LONGEST(*fn)(int)'. */
2907 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2909 unsigned char buf
[16];
2911 CORE_ADDR buildaddr
;
2913 EMIT_ASM32 (i386_int_call_1_a
,
2914 /* Reserve a bit of stack space. */
2916 /* Put the one argument on the stack. */
2917 buildaddr
= current_insn_ptr
;
2919 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2922 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2924 append_insns (&buildaddr
, i
, buf
);
2925 current_insn_ptr
= buildaddr
;
2926 i386_emit_call (fn
);
2927 EMIT_ASM32 (i386_int_call_1_c
,
2929 "lea 0x8(%esp),%esp");
2932 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2935 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2937 unsigned char buf
[16];
2939 CORE_ADDR buildaddr
;
2941 EMIT_ASM32 (i386_void_call_2_a
,
2942 /* Preserve %eax only; we don't have to worry about %ebx. */
2944 /* Reserve a bit of stack space for arguments. */
2945 "sub $0x10,%esp\n\t"
2946 /* Copy "top" to the second argument position. (Note that
2947 we can't assume function won't scribble on its
2948 arguments, so don't try to restore from this.) */
2949 "mov %eax,4(%esp)\n\t"
2950 "mov %ebx,8(%esp)");
2951 /* Put the first argument on the stack. */
2952 buildaddr
= current_insn_ptr
;
2954 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2957 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2959 append_insns (&buildaddr
, i
, buf
);
2960 current_insn_ptr
= buildaddr
;
2961 i386_emit_call (fn
);
2962 EMIT_ASM32 (i386_void_call_2_b
,
2963 "lea 0x10(%esp),%esp\n\t"
2964 /* Restore original stack top. */
2970 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2973 /* Check low half first, more likely to be decider */
2974 "cmpl %eax,(%esp)\n\t"
2975 "jne .Leq_fallthru\n\t"
2976 "cmpl %ebx,4(%esp)\n\t"
2977 "jne .Leq_fallthru\n\t"
2978 "lea 0x8(%esp),%esp\n\t"
2981 /* jmp, but don't trust the assembler to choose the right jump */
2982 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2983 ".Leq_fallthru:\n\t"
2984 "lea 0x8(%esp),%esp\n\t"
2995 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2998 /* Check low half first, more likely to be decider */
2999 "cmpl %eax,(%esp)\n\t"
3001 "cmpl %ebx,4(%esp)\n\t"
3002 "je .Lne_fallthru\n\t"
3004 "lea 0x8(%esp),%esp\n\t"
3007 /* jmp, but don't trust the assembler to choose the right jump */
3008 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3009 ".Lne_fallthru:\n\t"
3010 "lea 0x8(%esp),%esp\n\t"
3021 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3024 "cmpl %ebx,4(%esp)\n\t"
3026 "jne .Llt_fallthru\n\t"
3027 "cmpl %eax,(%esp)\n\t"
3028 "jnl .Llt_fallthru\n\t"
3030 "lea 0x8(%esp),%esp\n\t"
3033 /* jmp, but don't trust the assembler to choose the right jump */
3034 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3035 ".Llt_fallthru:\n\t"
3036 "lea 0x8(%esp),%esp\n\t"
3047 i386_emit_le_goto (int *offset_p
, int *size_p
)
3050 "cmpl %ebx,4(%esp)\n\t"
3052 "jne .Lle_fallthru\n\t"
3053 "cmpl %eax,(%esp)\n\t"
3054 "jnle .Lle_fallthru\n\t"
3056 "lea 0x8(%esp),%esp\n\t"
3059 /* jmp, but don't trust the assembler to choose the right jump */
3060 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3061 ".Lle_fallthru:\n\t"
3062 "lea 0x8(%esp),%esp\n\t"
3073 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3076 "cmpl %ebx,4(%esp)\n\t"
3078 "jne .Lgt_fallthru\n\t"
3079 "cmpl %eax,(%esp)\n\t"
3080 "jng .Lgt_fallthru\n\t"
3082 "lea 0x8(%esp),%esp\n\t"
3085 /* jmp, but don't trust the assembler to choose the right jump */
3086 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3087 ".Lgt_fallthru:\n\t"
3088 "lea 0x8(%esp),%esp\n\t"
3099 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3102 "cmpl %ebx,4(%esp)\n\t"
3104 "jne .Lge_fallthru\n\t"
3105 "cmpl %eax,(%esp)\n\t"
3106 "jnge .Lge_fallthru\n\t"
3108 "lea 0x8(%esp),%esp\n\t"
3111 /* jmp, but don't trust the assembler to choose the right jump */
3112 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3113 ".Lge_fallthru:\n\t"
3114 "lea 0x8(%esp),%esp\n\t"
3124 struct emit_ops i386_emit_ops
=
3132 i386_emit_rsh_signed
,
3133 i386_emit_rsh_unsigned
,
3141 i386_emit_less_signed
,
3142 i386_emit_less_unsigned
,
3146 i386_write_goto_address
,
3151 i386_emit_stack_flush
,
3154 i386_emit_stack_adjust
,
3155 i386_emit_int_call_1
,
3156 i386_emit_void_call_2
,
3166 static struct emit_ops
*
3170 int use_64bit
= register_size (0) == 8;
3173 return &amd64_emit_ops
;
3176 return &i386_emit_ops
;
3180 x86_supports_range_stepping (void)
3185 /* This is initialized assuming an amd64 target.
3186 x86_arch_setup will correct it for i386 or amd64 targets. */
3188 struct linux_target_ops the_low_target
=
3196 NULL
, /* fetch_register */
3206 x86_stopped_by_watchpoint
,
3207 x86_stopped_data_address
,
3208 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3209 native i386 case (no registers smaller than an xfer unit), and are not
3210 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3213 /* need to fix up i386 siginfo if host is amd64 */
3215 x86_linux_new_process
,
3216 x86_linux_new_thread
,
3217 x86_linux_prepare_to_resume
,
3218 x86_linux_process_qsupported
,
3219 x86_supports_tracepoints
,
3220 x86_get_thread_area
,
3221 x86_install_fast_tracepoint_jump_pad
,
3223 x86_get_min_fast_tracepoint_insn_len
,
3224 x86_supports_range_stepping
,