1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 /* Defined in auto-generated file i386-linux.c. */
35 void init_registers_i386_linux (void);
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 /* Defined in auto-generated file i386-avx-linux.c. */
39 void init_registers_i386_avx_linux (void);
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 /* Defined in auto-generated file i386-mmx-linux.c. */
43 void init_registers_i386_mmx_linux (void);
44 /* Defined in auto-generated file x32-linux.c. */
45 void init_registers_x32_linux (void);
46 /* Defined in auto-generated file x32-avx-linux.c. */
47 void init_registers_x32_avx_linux (void);
49 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
50 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
52 /* Backward compatibility for gdb without XML support. */
54 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
55 <architecture>i386</architecture>\
56 <osabi>GNU/Linux</osabi>\
60 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
61 <architecture>i386:x86-64</architecture>\
62 <osabi>GNU/Linux</osabi>\
67 #include <sys/procfs.h>
68 #include <sys/ptrace.h>
71 #ifndef PTRACE_GETREGSET
72 #define PTRACE_GETREGSET 0x4204
75 #ifndef PTRACE_SETREGSET
76 #define PTRACE_SETREGSET 0x4205
80 #ifndef PTRACE_GET_THREAD_AREA
81 #define PTRACE_GET_THREAD_AREA 25
84 /* This definition comes from prctl.h, but some kernels may not have it. */
85 #ifndef PTRACE_ARCH_PRCTL
86 #define PTRACE_ARCH_PRCTL 30
89 /* The following definitions come from prctl.h, but may be absent
90 for certain configurations. */
92 #define ARCH_SET_GS 0x1001
93 #define ARCH_SET_FS 0x1002
94 #define ARCH_GET_FS 0x1003
95 #define ARCH_GET_GS 0x1004
98 /* Per-process arch-specific data we want to keep. */
100 struct arch_process_info
102 struct i386_debug_reg_state debug_reg_state
;
105 /* Per-thread arch-specific data we want to keep. */
109 /* Non-zero if our copy differs from what's recorded in the thread. */
110 int debug_registers_changed
;
115 /* Mapping between the general-purpose registers in `struct user'
116 format and GDB's register array layout.
117 Note that the transfer layout uses 64-bit regs. */
118 static /*const*/ int i386_regmap
[] =
120 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
121 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
122 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
123 DS
* 8, ES
* 8, FS
* 8, GS
* 8
126 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
128 /* So code below doesn't have to care, i386 or amd64. */
129 #define ORIG_EAX ORIG_RAX
131 static const int x86_64_regmap
[] =
133 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
134 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
135 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
136 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
137 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
138 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1,
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, -1,
146 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
148 #else /* ! __x86_64__ */
150 /* Mapping between the general-purpose registers in `struct user'
151 format and GDB's register array layout. */
152 static /*const*/ int i386_regmap
[] =
154 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
155 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
156 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
157 DS
* 4, ES
* 4, FS
* 4, GS
* 4
160 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
164 /* Called by libthread_db. */
167 ps_get_thread_area (const struct ps_prochandle
*ph
,
168 lwpid_t lwpid
, int idx
, void **base
)
171 int use_64bit
= register_size (0) == 8;
178 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
182 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
193 unsigned int desc
[4];
195 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
196 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
199 *(int *)base
= desc
[1];
204 /* Get the thread area address. This is used to recognize which
205 thread is which when tracing with the in-process agent library. We
206 don't read anything from the address, and treat it as opaque; it's
207 the address itself that we assume is unique per-thread. */
210 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
213 int use_64bit
= register_size (0) == 8;
218 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
220 *addr
= (CORE_ADDR
) (uintptr_t) base
;
229 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
230 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
231 unsigned int desc
[4];
233 const int reg_thread_area
= 3; /* bits to scale down register value. */
236 collect_register_by_name (regcache
, "gs", &gs
);
238 idx
= gs
>> reg_thread_area
;
240 if (ptrace (PTRACE_GET_THREAD_AREA
,
242 (void *) (long) idx
, (unsigned long) &desc
) < 0)
253 i386_cannot_store_register (int regno
)
255 return regno
>= I386_NUM_REGS
;
259 i386_cannot_fetch_register (int regno
)
261 return regno
>= I386_NUM_REGS
;
265 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
270 if (register_size (0) == 8)
272 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
273 if (x86_64_regmap
[i
] != -1)
274 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
279 for (i
= 0; i
< I386_NUM_REGS
; i
++)
280 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
282 collect_register_by_name (regcache
, "orig_eax",
283 ((char *) buf
) + ORIG_EAX
* 4);
287 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
292 if (register_size (0) == 8)
294 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
295 if (x86_64_regmap
[i
] != -1)
296 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
301 for (i
= 0; i
< I386_NUM_REGS
; i
++)
302 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
304 supply_register_by_name (regcache
, "orig_eax",
305 ((char *) buf
) + ORIG_EAX
* 4);
309 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
312 i387_cache_to_fxsave (regcache
, buf
);
314 i387_cache_to_fsave (regcache
, buf
);
319 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
322 i387_fxsave_to_cache (regcache
, buf
);
324 i387_fsave_to_cache (regcache
, buf
);
331 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
333 i387_cache_to_fxsave (regcache
, buf
);
337 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
339 i387_fxsave_to_cache (regcache
, buf
);
345 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
347 i387_cache_to_xsave (regcache
, buf
);
351 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
353 i387_xsave_to_cache (regcache
, buf
);
356 /* ??? The non-biarch i386 case stores all the i387 regs twice.
357 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
358 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
359 doesn't work. IWBN to avoid the duplication in the case where it
360 does work. Maybe the arch_setup routine could check whether it works
361 and update target_regsets accordingly, maybe by moving target_regsets
362 to linux_target_ops and set the right one there, rather than having to
363 modify the target_regsets global. */
365 struct regset_info target_regsets
[] =
367 #ifdef HAVE_PTRACE_GETREGS
368 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
370 x86_fill_gregset
, x86_store_gregset
},
371 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
372 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
374 # ifdef HAVE_PTRACE_GETFPXREGS
375 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
377 x86_fill_fpxregset
, x86_store_fpxregset
},
380 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
382 x86_fill_fpregset
, x86_store_fpregset
},
383 #endif /* HAVE_PTRACE_GETREGS */
384 { 0, 0, 0, -1, -1, NULL
, NULL
}
388 x86_get_pc (struct regcache
*regcache
)
390 int use_64bit
= register_size (0) == 8;
395 collect_register_by_name (regcache
, "rip", &pc
);
396 return (CORE_ADDR
) pc
;
401 collect_register_by_name (regcache
, "eip", &pc
);
402 return (CORE_ADDR
) pc
;
407 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
409 int use_64bit
= register_size (0) == 8;
413 unsigned long newpc
= pc
;
414 supply_register_by_name (regcache
, "rip", &newpc
);
418 unsigned int newpc
= pc
;
419 supply_register_by_name (regcache
, "eip", &newpc
);
423 static const unsigned char x86_breakpoint
[] = { 0xCC };
424 #define x86_breakpoint_len 1
427 x86_breakpoint_at (CORE_ADDR pc
)
431 (*the_target
->read_memory
) (pc
, &c
, 1);
438 /* Support for debug registers. */
441 x86_linux_dr_get (ptid_t ptid
, int regnum
)
446 tid
= ptid_get_lwp (ptid
);
449 value
= ptrace (PTRACE_PEEKUSER
, tid
,
450 offsetof (struct user
, u_debugreg
[regnum
]), 0);
452 error ("Couldn't read debug register");
458 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
462 tid
= ptid_get_lwp (ptid
);
465 ptrace (PTRACE_POKEUSER
, tid
,
466 offsetof (struct user
, u_debugreg
[regnum
]), value
);
468 error ("Couldn't write debug register");
472 update_debug_registers_callback (struct inferior_list_entry
*entry
,
475 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
476 int pid
= *(int *) pid_p
;
478 /* Only update the threads of this process. */
479 if (pid_of (lwp
) == pid
)
481 /* The actual update is done later just before resuming the lwp,
482 we just mark that the registers need updating. */
483 lwp
->arch_private
->debug_registers_changed
= 1;
485 /* If the lwp isn't stopped, force it to momentarily pause, so
486 we can update its debug registers. */
488 linux_stop_lwp (lwp
);
494 /* Update the inferior's debug register REGNUM from STATE. */
497 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
499 /* Only update the threads of this process. */
500 int pid
= pid_of (get_thread_lwp (current_inferior
));
502 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
503 fatal ("Invalid debug register %d", regnum
);
505 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
508 /* Return the inferior's debug register REGNUM. */
511 i386_dr_low_get_addr (int regnum
)
513 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
514 ptid_t ptid
= ptid_of (lwp
);
516 /* DR6 and DR7 are retrieved with some other way. */
517 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
519 return x86_linux_dr_get (ptid
, regnum
);
522 /* Update the inferior's DR7 debug control register from STATE. */
525 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
527 /* Only update the threads of this process. */
528 int pid
= pid_of (get_thread_lwp (current_inferior
));
530 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
533 /* Return the inferior's DR7 debug control register. */
536 i386_dr_low_get_control (void)
538 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
539 ptid_t ptid
= ptid_of (lwp
);
541 return x86_linux_dr_get (ptid
, DR_CONTROL
);
544 /* Get the value of the DR6 debug status register from the inferior
545 and record it in STATE. */
548 i386_dr_low_get_status (void)
550 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
551 ptid_t ptid
= ptid_of (lwp
);
553 return x86_linux_dr_get (ptid
, DR_STATUS
);
556 /* Breakpoint/Watchpoint support. */
559 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
561 struct process_info
*proc
= current_process ();
564 case '0': /* software-breakpoint */
568 ret
= prepare_to_access_memory ();
571 ret
= set_gdb_breakpoint_at (addr
);
572 done_accessing_memory ();
575 case '1': /* hardware-breakpoint */
576 case '2': /* write watchpoint */
577 case '3': /* read watchpoint */
578 case '4': /* access watchpoint */
579 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
589 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
591 struct process_info
*proc
= current_process ();
594 case '0': /* software-breakpoint */
598 ret
= prepare_to_access_memory ();
601 ret
= delete_gdb_breakpoint_at (addr
);
602 done_accessing_memory ();
605 case '1': /* hardware-breakpoint */
606 case '2': /* write watchpoint */
607 case '3': /* read watchpoint */
608 case '4': /* access watchpoint */
609 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
618 x86_stopped_by_watchpoint (void)
620 struct process_info
*proc
= current_process ();
621 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
625 x86_stopped_data_address (void)
627 struct process_info
*proc
= current_process ();
629 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
635 /* Called when a new process is created. */
637 static struct arch_process_info
*
638 x86_linux_new_process (void)
640 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
642 i386_low_init_dregs (&info
->debug_reg_state
);
647 /* Called when a new thread is detected. */
649 static struct arch_lwp_info
*
650 x86_linux_new_thread (void)
652 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
654 info
->debug_registers_changed
= 1;
659 /* Called when resuming a thread.
660 If the debug regs have changed, update the thread's copies. */
663 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
665 ptid_t ptid
= ptid_of (lwp
);
666 int clear_status
= 0;
668 if (lwp
->arch_private
->debug_registers_changed
)
671 int pid
= ptid_get_pid (ptid
);
672 struct process_info
*proc
= find_process_pid (pid
);
673 struct i386_debug_reg_state
*state
674 = &proc
->private->arch_private
->debug_reg_state
;
676 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
677 if (state
->dr_ref_count
[i
] > 0)
679 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
681 /* If we're setting a watchpoint, any change the inferior
682 had done itself to the debug registers needs to be
683 discarded, otherwise, i386_low_stopped_data_address can
688 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
690 lwp
->arch_private
->debug_registers_changed
= 0;
693 if (clear_status
|| lwp
->stopped_by_watchpoint
)
694 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
697 /* When GDBSERVER is built as a 64-bit application on linux, the
698 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
699 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
700 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
701 conversion in-place ourselves. */
703 /* These types below (compat_*) define a siginfo type that is layout
704 compatible with the siginfo type exported by the 32-bit userspace
709 typedef int compat_int_t
;
710 typedef unsigned int compat_uptr_t
;
712 typedef int compat_time_t
;
713 typedef int compat_timer_t
;
714 typedef int compat_clock_t
;
716 struct compat_timeval
718 compat_time_t tv_sec
;
722 typedef union compat_sigval
724 compat_int_t sival_int
;
725 compat_uptr_t sival_ptr
;
728 typedef struct compat_siginfo
736 int _pad
[((128 / sizeof (int)) - 3)];
745 /* POSIX.1b timers */
750 compat_sigval_t _sigval
;
753 /* POSIX.1b signals */
758 compat_sigval_t _sigval
;
767 compat_clock_t _utime
;
768 compat_clock_t _stime
;
771 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
786 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
787 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
789 typedef struct compat_x32_siginfo
797 int _pad
[((128 / sizeof (int)) - 3)];
806 /* POSIX.1b timers */
811 compat_sigval_t _sigval
;
814 /* POSIX.1b signals */
819 compat_sigval_t _sigval
;
828 compat_x32_clock_t _utime
;
829 compat_x32_clock_t _stime
;
832 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
845 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
847 #define cpt_si_pid _sifields._kill._pid
848 #define cpt_si_uid _sifields._kill._uid
849 #define cpt_si_timerid _sifields._timer._tid
850 #define cpt_si_overrun _sifields._timer._overrun
851 #define cpt_si_status _sifields._sigchld._status
852 #define cpt_si_utime _sifields._sigchld._utime
853 #define cpt_si_stime _sifields._sigchld._stime
854 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
855 #define cpt_si_addr _sifields._sigfault._addr
856 #define cpt_si_band _sifields._sigpoll._band
857 #define cpt_si_fd _sifields._sigpoll._fd
859 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
860 In their place is si_timer1,si_timer2. */
862 #define si_timerid si_timer1
865 #define si_overrun si_timer2
869 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
871 memset (to
, 0, sizeof (*to
));
873 to
->si_signo
= from
->si_signo
;
874 to
->si_errno
= from
->si_errno
;
875 to
->si_code
= from
->si_code
;
877 if (to
->si_code
== SI_TIMER
)
879 to
->cpt_si_timerid
= from
->si_timerid
;
880 to
->cpt_si_overrun
= from
->si_overrun
;
881 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
883 else if (to
->si_code
== SI_USER
)
885 to
->cpt_si_pid
= from
->si_pid
;
886 to
->cpt_si_uid
= from
->si_uid
;
888 else if (to
->si_code
< 0)
890 to
->cpt_si_pid
= from
->si_pid
;
891 to
->cpt_si_uid
= from
->si_uid
;
892 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
896 switch (to
->si_signo
)
899 to
->cpt_si_pid
= from
->si_pid
;
900 to
->cpt_si_uid
= from
->si_uid
;
901 to
->cpt_si_status
= from
->si_status
;
902 to
->cpt_si_utime
= from
->si_utime
;
903 to
->cpt_si_stime
= from
->si_stime
;
909 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
912 to
->cpt_si_band
= from
->si_band
;
913 to
->cpt_si_fd
= from
->si_fd
;
916 to
->cpt_si_pid
= from
->si_pid
;
917 to
->cpt_si_uid
= from
->si_uid
;
918 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
925 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
927 memset (to
, 0, sizeof (*to
));
929 to
->si_signo
= from
->si_signo
;
930 to
->si_errno
= from
->si_errno
;
931 to
->si_code
= from
->si_code
;
933 if (to
->si_code
== SI_TIMER
)
935 to
->si_timerid
= from
->cpt_si_timerid
;
936 to
->si_overrun
= from
->cpt_si_overrun
;
937 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
939 else if (to
->si_code
== SI_USER
)
941 to
->si_pid
= from
->cpt_si_pid
;
942 to
->si_uid
= from
->cpt_si_uid
;
944 else if (to
->si_code
< 0)
946 to
->si_pid
= from
->cpt_si_pid
;
947 to
->si_uid
= from
->cpt_si_uid
;
948 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
952 switch (to
->si_signo
)
955 to
->si_pid
= from
->cpt_si_pid
;
956 to
->si_uid
= from
->cpt_si_uid
;
957 to
->si_status
= from
->cpt_si_status
;
958 to
->si_utime
= from
->cpt_si_utime
;
959 to
->si_stime
= from
->cpt_si_stime
;
965 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
968 to
->si_band
= from
->cpt_si_band
;
969 to
->si_fd
= from
->cpt_si_fd
;
972 to
->si_pid
= from
->cpt_si_pid
;
973 to
->si_uid
= from
->cpt_si_uid
;
974 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
981 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
984 memset (to
, 0, sizeof (*to
));
986 to
->si_signo
= from
->si_signo
;
987 to
->si_errno
= from
->si_errno
;
988 to
->si_code
= from
->si_code
;
990 if (to
->si_code
== SI_TIMER
)
992 to
->cpt_si_timerid
= from
->si_timerid
;
993 to
->cpt_si_overrun
= from
->si_overrun
;
994 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
996 else if (to
->si_code
== SI_USER
)
998 to
->cpt_si_pid
= from
->si_pid
;
999 to
->cpt_si_uid
= from
->si_uid
;
1001 else if (to
->si_code
< 0)
1003 to
->cpt_si_pid
= from
->si_pid
;
1004 to
->cpt_si_uid
= from
->si_uid
;
1005 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1009 switch (to
->si_signo
)
1012 to
->cpt_si_pid
= from
->si_pid
;
1013 to
->cpt_si_uid
= from
->si_uid
;
1014 to
->cpt_si_status
= from
->si_status
;
1015 to
->cpt_si_utime
= from
->si_utime
;
1016 to
->cpt_si_stime
= from
->si_stime
;
1022 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1025 to
->cpt_si_band
= from
->si_band
;
1026 to
->cpt_si_fd
= from
->si_fd
;
1029 to
->cpt_si_pid
= from
->si_pid
;
1030 to
->cpt_si_uid
= from
->si_uid
;
1031 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1038 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1039 compat_x32_siginfo_t
*from
)
1041 memset (to
, 0, sizeof (*to
));
1043 to
->si_signo
= from
->si_signo
;
1044 to
->si_errno
= from
->si_errno
;
1045 to
->si_code
= from
->si_code
;
1047 if (to
->si_code
== SI_TIMER
)
1049 to
->si_timerid
= from
->cpt_si_timerid
;
1050 to
->si_overrun
= from
->cpt_si_overrun
;
1051 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1053 else if (to
->si_code
== SI_USER
)
1055 to
->si_pid
= from
->cpt_si_pid
;
1056 to
->si_uid
= from
->cpt_si_uid
;
1058 else if (to
->si_code
< 0)
1060 to
->si_pid
= from
->cpt_si_pid
;
1061 to
->si_uid
= from
->cpt_si_uid
;
1062 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1066 switch (to
->si_signo
)
1069 to
->si_pid
= from
->cpt_si_pid
;
1070 to
->si_uid
= from
->cpt_si_uid
;
1071 to
->si_status
= from
->cpt_si_status
;
1072 to
->si_utime
= from
->cpt_si_utime
;
1073 to
->si_stime
= from
->cpt_si_stime
;
1079 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1082 to
->si_band
= from
->cpt_si_band
;
1083 to
->si_fd
= from
->cpt_si_fd
;
1086 to
->si_pid
= from
->cpt_si_pid
;
1087 to
->si_uid
= from
->cpt_si_uid
;
1088 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1094 /* Is this process 64-bit? */
1095 static int linux_is_elf64
;
1096 #endif /* __x86_64__ */
1098 /* Convert a native/host siginfo object, into/from the siginfo in the
1099 layout of the inferiors' architecture. Returns true if any
1100 conversion was done; false otherwise. If DIRECTION is 1, then copy
1101 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1105 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1108 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1109 if (register_size (0) == 4)
1111 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1112 fatal ("unexpected difference in siginfo");
1115 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1117 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1121 /* No fixup for native x32 GDB. */
1122 else if (!linux_is_elf64
&& sizeof (void *) == 8)
1124 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1125 fatal ("unexpected difference in siginfo");
1128 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1131 siginfo_from_compat_x32_siginfo (native
,
1132 (struct compat_x32_siginfo
*) inf
);
1143 /* Update gdbserver_xmltarget. */
1146 x86_linux_update_xmltarget (void)
1149 struct regset_info
*regset
;
1150 static unsigned long long xcr0
;
1151 static int have_ptrace_getregset
= -1;
1152 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
1153 static int have_ptrace_getfpxregs
= -1;
1156 if (!current_inferior
)
1159 /* Before changing the register cache internal layout or the target
1160 regsets, flush the contents of the current valid caches back to
1162 regcache_invalidate ();
1164 pid
= pid_of (get_thread_lwp (current_inferior
));
1166 if (num_xmm_registers
== 8)
1167 init_registers_i386_linux ();
1168 else if (linux_is_elf64
)
1169 init_registers_amd64_linux ();
1171 init_registers_x32_linux ();
1174 # ifdef HAVE_PTRACE_GETFPXREGS
1175 if (have_ptrace_getfpxregs
== -1)
1177 elf_fpxregset_t fpxregs
;
1179 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
1181 have_ptrace_getfpxregs
= 0;
1182 x86_xcr0
= I386_XSTATE_X87_MASK
;
1184 /* Disable PTRACE_GETFPXREGS. */
1185 for (regset
= target_regsets
;
1186 regset
->fill_function
!= NULL
; regset
++)
1187 if (regset
->get_request
== PTRACE_GETFPXREGS
)
1194 have_ptrace_getfpxregs
= 1;
1197 if (!have_ptrace_getfpxregs
)
1199 init_registers_i386_mmx_linux ();
1203 init_registers_i386_linux ();
1209 /* Don't use XML. */
1211 if (num_xmm_registers
== 8)
1212 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1214 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1216 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1219 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1224 /* Check if XSAVE extended state is supported. */
1225 if (have_ptrace_getregset
== -1)
1227 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1230 iov
.iov_base
= xstateregs
;
1231 iov
.iov_len
= sizeof (xstateregs
);
1233 /* Check if PTRACE_GETREGSET works. */
1234 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1237 have_ptrace_getregset
= 0;
1241 have_ptrace_getregset
= 1;
1243 /* Get XCR0 from XSAVE extended state at byte 464. */
1244 xcr0
= xstateregs
[464 / sizeof (long long)];
1246 /* Use PTRACE_GETREGSET if it is available. */
1247 for (regset
= target_regsets
;
1248 regset
->fill_function
!= NULL
; regset
++)
1249 if (regset
->get_request
== PTRACE_GETREGSET
)
1250 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1251 else if (regset
->type
!= GENERAL_REGS
)
1255 if (have_ptrace_getregset
)
1257 /* AVX is the highest feature we support. */
1258 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1263 /* I386 has 8 xmm regs. */
1264 if (num_xmm_registers
== 8)
1265 init_registers_i386_avx_linux ();
1266 else if (linux_is_elf64
)
1267 init_registers_amd64_avx_linux ();
1269 init_registers_x32_avx_linux ();
1271 init_registers_i386_avx_linux ();
1277 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1278 PTRACE_GETREGSET. */
1281 x86_linux_process_qsupported (const char *query
)
1283 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1284 with "i386" in qSupported query, it supports x86 XML target
1287 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1289 char *copy
= xstrdup (query
+ 13);
1292 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1294 if (strcmp (p
, "i386") == 0)
1304 x86_linux_update_xmltarget ();
1307 /* Initialize gdbserver for the architecture of the inferior. */
1310 x86_arch_setup (void)
1312 int pid
= pid_of (get_thread_lwp (current_inferior
));
1313 unsigned int machine
;
1314 int is_elf64
= linux_pid_exe_is_elf_64_file (pid
, &machine
);
1316 if (sizeof (void *) == 4)
1319 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1321 else if (machine
== EM_X86_64
)
1322 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1329 /* This can only happen if /proc/<pid>/exe is unreadable,
1330 but "that can't happen" if we've gotten this far.
1331 Fall through and assume this is a 32-bit program. */
1333 else if (machine
== EM_X86_64
)
1335 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1336 the_low_target
.num_regs
= -1;
1337 the_low_target
.regmap
= NULL
;
1338 the_low_target
.cannot_fetch_register
= NULL
;
1339 the_low_target
.cannot_store_register
= NULL
;
1341 /* Amd64 has 16 xmm regs. */
1342 num_xmm_registers
= 16;
1344 linux_is_elf64
= is_elf64
;
1345 x86_linux_update_xmltarget ();
1352 /* Ok we have a 32-bit inferior. */
1354 the_low_target
.num_regs
= I386_NUM_REGS
;
1355 the_low_target
.regmap
= i386_regmap
;
1356 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1357 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1359 /* I386 has 8 xmm regs. */
1360 num_xmm_registers
= 8;
1362 x86_linux_update_xmltarget ();
1366 x86_supports_tracepoints (void)
1372 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1374 write_inferior_memory (*to
, buf
, len
);
1379 push_opcode (unsigned char *buf
, char *op
)
1381 unsigned char *buf_org
= buf
;
1386 unsigned long ul
= strtoul (op
, &endptr
, 16);
1395 return buf
- buf_org
;
1400 /* Build a jump pad that saves registers and calls a collection
1401 function. Writes a jump instruction to the jump pad to
1402 JJUMPAD_INSN. The caller is responsible to write it in at the
1403 tracepoint address. */
1406 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1407 CORE_ADDR collector
,
1410 CORE_ADDR
*jump_entry
,
1411 CORE_ADDR
*trampoline
,
1412 ULONGEST
*trampoline_size
,
1413 unsigned char *jjump_pad_insn
,
1414 ULONGEST
*jjump_pad_insn_size
,
1415 CORE_ADDR
*adjusted_insn_addr
,
1416 CORE_ADDR
*adjusted_insn_addr_end
,
1419 unsigned char buf
[40];
1423 CORE_ADDR buildaddr
= *jump_entry
;
1425 /* Build the jump pad. */
1427 /* First, do tracepoint data collection. Save registers. */
1429 /* Need to ensure stack pointer saved first. */
1430 buf
[i
++] = 0x54; /* push %rsp */
1431 buf
[i
++] = 0x55; /* push %rbp */
1432 buf
[i
++] = 0x57; /* push %rdi */
1433 buf
[i
++] = 0x56; /* push %rsi */
1434 buf
[i
++] = 0x52; /* push %rdx */
1435 buf
[i
++] = 0x51; /* push %rcx */
1436 buf
[i
++] = 0x53; /* push %rbx */
1437 buf
[i
++] = 0x50; /* push %rax */
1438 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1439 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1440 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1441 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1442 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1443 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1444 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1445 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1446 buf
[i
++] = 0x9c; /* pushfq */
1447 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1449 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1450 i
+= sizeof (unsigned long);
1451 buf
[i
++] = 0x57; /* push %rdi */
1452 append_insns (&buildaddr
, i
, buf
);
1454 /* Stack space for the collecting_t object. */
1456 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1457 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1458 memcpy (buf
+ i
, &tpoint
, 8);
1460 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1461 i
+= push_opcode (&buf
[i
],
1462 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1463 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1464 append_insns (&buildaddr
, i
, buf
);
1468 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1469 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1471 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1472 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1473 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1474 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1475 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1476 append_insns (&buildaddr
, i
, buf
);
1478 /* Set up the gdb_collect call. */
1479 /* At this point, (stack pointer + 0x18) is the base of our saved
1483 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1484 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1486 /* tpoint address may be 64-bit wide. */
1487 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1488 memcpy (buf
+ i
, &tpoint
, 8);
1490 append_insns (&buildaddr
, i
, buf
);
1492 /* The collector function being in the shared library, may be
1493 >31-bits away off the jump pad. */
1495 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1496 memcpy (buf
+ i
, &collector
, 8);
1498 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1499 append_insns (&buildaddr
, i
, buf
);
1501 /* Clear the spin-lock. */
1503 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1504 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1505 memcpy (buf
+ i
, &lockaddr
, 8);
1507 append_insns (&buildaddr
, i
, buf
);
1509 /* Remove stack that had been used for the collect_t object. */
1511 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1512 append_insns (&buildaddr
, i
, buf
);
1514 /* Restore register state. */
1516 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1520 buf
[i
++] = 0x9d; /* popfq */
1521 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1522 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1523 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1524 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1525 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1526 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1527 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1528 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1529 buf
[i
++] = 0x58; /* pop %rax */
1530 buf
[i
++] = 0x5b; /* pop %rbx */
1531 buf
[i
++] = 0x59; /* pop %rcx */
1532 buf
[i
++] = 0x5a; /* pop %rdx */
1533 buf
[i
++] = 0x5e; /* pop %rsi */
1534 buf
[i
++] = 0x5f; /* pop %rdi */
1535 buf
[i
++] = 0x5d; /* pop %rbp */
1536 buf
[i
++] = 0x5c; /* pop %rsp */
1537 append_insns (&buildaddr
, i
, buf
);
1539 /* Now, adjust the original instruction to execute in the jump
1541 *adjusted_insn_addr
= buildaddr
;
1542 relocate_instruction (&buildaddr
, tpaddr
);
1543 *adjusted_insn_addr_end
= buildaddr
;
1545 /* Finally, write a jump back to the program. */
1547 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1548 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1551 "E.Jump back from jump pad too far from tracepoint "
1552 "(offset 0x%" PRIx64
" > int32).", loffset
);
1556 offset
= (int) loffset
;
1557 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1558 memcpy (buf
+ 1, &offset
, 4);
1559 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1561 /* The jump pad is now built. Wire in a jump to our jump pad. This
1562 is always done last (by our caller actually), so that we can
1563 install fast tracepoints with threads running. This relies on
1564 the agent's atomic write support. */
1565 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1566 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1569 "E.Jump pad too far from tracepoint "
1570 "(offset 0x%" PRIx64
" > int32).", loffset
);
1574 offset
= (int) loffset
;
1576 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1577 memcpy (buf
+ 1, &offset
, 4);
1578 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1579 *jjump_pad_insn_size
= sizeof (jump_insn
);
1581 /* Return the end address of our pad. */
1582 *jump_entry
= buildaddr
;
1587 #endif /* __x86_64__ */
1589 /* Build a jump pad that saves registers and calls a collection
1590 function. Writes a jump instruction to the jump pad to
1591 JJUMPAD_INSN. The caller is responsible to write it in at the
1592 tracepoint address. */
1595 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1596 CORE_ADDR collector
,
1599 CORE_ADDR
*jump_entry
,
1600 CORE_ADDR
*trampoline
,
1601 ULONGEST
*trampoline_size
,
1602 unsigned char *jjump_pad_insn
,
1603 ULONGEST
*jjump_pad_insn_size
,
1604 CORE_ADDR
*adjusted_insn_addr
,
1605 CORE_ADDR
*adjusted_insn_addr_end
,
1608 unsigned char buf
[0x100];
1610 CORE_ADDR buildaddr
= *jump_entry
;
1612 /* Build the jump pad. */
1614 /* First, do tracepoint data collection. Save registers. */
1616 buf
[i
++] = 0x60; /* pushad */
1617 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1618 *((int *)(buf
+ i
)) = (int) tpaddr
;
1620 buf
[i
++] = 0x9c; /* pushf */
1621 buf
[i
++] = 0x1e; /* push %ds */
1622 buf
[i
++] = 0x06; /* push %es */
1623 buf
[i
++] = 0x0f; /* push %fs */
1625 buf
[i
++] = 0x0f; /* push %gs */
1627 buf
[i
++] = 0x16; /* push %ss */
1628 buf
[i
++] = 0x0e; /* push %cs */
1629 append_insns (&buildaddr
, i
, buf
);
1631 /* Stack space for the collecting_t object. */
1633 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1635 /* Build the object. */
1636 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1637 memcpy (buf
+ i
, &tpoint
, 4);
1639 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1641 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1642 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1643 append_insns (&buildaddr
, i
, buf
);
1645 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1646 If we cared for it, this could be using xchg alternatively. */
1649 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1650 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1652 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1654 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1655 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1656 append_insns (&buildaddr
, i
, buf
);
1659 /* Set up arguments to the gdb_collect call. */
1661 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1662 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1663 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1664 append_insns (&buildaddr
, i
, buf
);
1667 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1668 append_insns (&buildaddr
, i
, buf
);
1671 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1672 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1674 append_insns (&buildaddr
, i
, buf
);
1676 buf
[0] = 0xe8; /* call <reladdr> */
1677 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1678 memcpy (buf
+ 1, &offset
, 4);
1679 append_insns (&buildaddr
, 5, buf
);
1680 /* Clean up after the call. */
1681 buf
[0] = 0x83; /* add $0x8,%esp */
1684 append_insns (&buildaddr
, 3, buf
);
1687 /* Clear the spin-lock. This would need the LOCK prefix on older
1690 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1691 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1692 memcpy (buf
+ i
, &lockaddr
, 4);
1694 append_insns (&buildaddr
, i
, buf
);
1697 /* Remove stack that had been used for the collect_t object. */
1699 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1700 append_insns (&buildaddr
, i
, buf
);
1703 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1706 buf
[i
++] = 0x17; /* pop %ss */
1707 buf
[i
++] = 0x0f; /* pop %gs */
1709 buf
[i
++] = 0x0f; /* pop %fs */
1711 buf
[i
++] = 0x07; /* pop %es */
1712 buf
[i
++] = 0x1f; /* pop %ds */
1713 buf
[i
++] = 0x9d; /* popf */
1714 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1717 buf
[i
++] = 0x61; /* popad */
1718 append_insns (&buildaddr
, i
, buf
);
1720 /* Now, adjust the original instruction to execute in the jump
1722 *adjusted_insn_addr
= buildaddr
;
1723 relocate_instruction (&buildaddr
, tpaddr
);
1724 *adjusted_insn_addr_end
= buildaddr
;
1726 /* Write the jump back to the program. */
1727 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1728 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1729 memcpy (buf
+ 1, &offset
, 4);
1730 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1732 /* The jump pad is now built. Wire in a jump to our jump pad. This
1733 is always done last (by our caller actually), so that we can
1734 install fast tracepoints with threads running. This relies on
1735 the agent's atomic write support. */
1738 /* Create a trampoline. */
1739 *trampoline_size
= sizeof (jump_insn
);
1740 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1742 /* No trampoline space available. */
1744 "E.Cannot allocate trampoline space needed for fast "
1745 "tracepoints on 4-byte instructions.");
1749 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1750 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1751 memcpy (buf
+ 1, &offset
, 4);
1752 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1754 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1755 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1756 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1757 memcpy (buf
+ 2, &offset
, 2);
1758 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1759 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1763 /* Else use a 32-bit relative jump instruction. */
1764 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1765 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1766 memcpy (buf
+ 1, &offset
, 4);
1767 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1768 *jjump_pad_insn_size
= sizeof (jump_insn
);
1771 /* Return the end address of our pad. */
1772 *jump_entry
= buildaddr
;
1778 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1779 CORE_ADDR collector
,
1782 CORE_ADDR
*jump_entry
,
1783 CORE_ADDR
*trampoline
,
1784 ULONGEST
*trampoline_size
,
1785 unsigned char *jjump_pad_insn
,
1786 ULONGEST
*jjump_pad_insn_size
,
1787 CORE_ADDR
*adjusted_insn_addr
,
1788 CORE_ADDR
*adjusted_insn_addr_end
,
1792 if (register_size (0) == 8)
1793 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1794 collector
, lockaddr
,
1795 orig_size
, jump_entry
,
1796 trampoline
, trampoline_size
,
1798 jjump_pad_insn_size
,
1800 adjusted_insn_addr_end
,
1804 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1805 collector
, lockaddr
,
1806 orig_size
, jump_entry
,
1807 trampoline
, trampoline_size
,
1809 jjump_pad_insn_size
,
1811 adjusted_insn_addr_end
,
1815 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1819 x86_get_min_fast_tracepoint_insn_len (void)
1821 static int warned_about_fast_tracepoints
= 0;
1824 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1825 used for fast tracepoints. */
1826 if (register_size (0) == 8)
1830 if (agent_loaded_p ())
1832 char errbuf
[IPA_BUFSIZ
];
1836 /* On x86, if trampolines are available, then 4-byte jump instructions
1837 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1838 with a 4-byte offset are used instead. */
1839 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1843 /* GDB has no channel to explain to user why a shorter fast
1844 tracepoint is not possible, but at least make GDBserver
1845 mention that something has gone awry. */
1846 if (!warned_about_fast_tracepoints
)
1848 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1849 warned_about_fast_tracepoints
= 1;
1856 /* Indicate that the minimum length is currently unknown since the IPA
1857 has not loaded yet. */
1863 add_insns (unsigned char *start
, int len
)
1865 CORE_ADDR buildaddr
= current_insn_ptr
;
1868 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1869 len
, paddress (buildaddr
));
1871 append_insns (&buildaddr
, len
, start
);
1872 current_insn_ptr
= buildaddr
;
1875 /* Our general strategy for emitting code is to avoid specifying raw
1876 bytes whenever possible, and instead copy a block of inline asm
1877 that is embedded in the function. This is a little messy, because
1878 we need to keep the compiler from discarding what looks like dead
1879 code, plus suppress various warnings. */
1881 #define EMIT_ASM(NAME, INSNS) \
1884 extern unsigned char start_ ## NAME, end_ ## NAME; \
1885 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1886 __asm__ ("jmp end_" #NAME "\n" \
1887 "\t" "start_" #NAME ":" \
1889 "\t" "end_" #NAME ":"); \
1894 #define EMIT_ASM32(NAME,INSNS) \
1897 extern unsigned char start_ ## NAME, end_ ## NAME; \
1898 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1899 __asm__ (".code32\n" \
1900 "\t" "jmp end_" #NAME "\n" \
1901 "\t" "start_" #NAME ":\n" \
1903 "\t" "end_" #NAME ":\n" \
1909 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1916 amd64_emit_prologue (void)
1918 EMIT_ASM (amd64_prologue
,
1920 "movq %rsp,%rbp\n\t"
1921 "sub $0x20,%rsp\n\t"
1922 "movq %rdi,-8(%rbp)\n\t"
1923 "movq %rsi,-16(%rbp)");
1928 amd64_emit_epilogue (void)
1930 EMIT_ASM (amd64_epilogue
,
1931 "movq -16(%rbp),%rdi\n\t"
1932 "movq %rax,(%rdi)\n\t"
1939 amd64_emit_add (void)
1941 EMIT_ASM (amd64_add
,
1942 "add (%rsp),%rax\n\t"
1943 "lea 0x8(%rsp),%rsp");
1947 amd64_emit_sub (void)
1949 EMIT_ASM (amd64_sub
,
1950 "sub %rax,(%rsp)\n\t"
1955 amd64_emit_mul (void)
1961 amd64_emit_lsh (void)
1967 amd64_emit_rsh_signed (void)
1973 amd64_emit_rsh_unsigned (void)
1979 amd64_emit_ext (int arg
)
1984 EMIT_ASM (amd64_ext_8
,
1990 EMIT_ASM (amd64_ext_16
,
1995 EMIT_ASM (amd64_ext_32
,
2004 amd64_emit_log_not (void)
2006 EMIT_ASM (amd64_log_not
,
2007 "test %rax,%rax\n\t"
2013 amd64_emit_bit_and (void)
2015 EMIT_ASM (amd64_and
,
2016 "and (%rsp),%rax\n\t"
2017 "lea 0x8(%rsp),%rsp");
2021 amd64_emit_bit_or (void)
2024 "or (%rsp),%rax\n\t"
2025 "lea 0x8(%rsp),%rsp");
2029 amd64_emit_bit_xor (void)
2031 EMIT_ASM (amd64_xor
,
2032 "xor (%rsp),%rax\n\t"
2033 "lea 0x8(%rsp),%rsp");
2037 amd64_emit_bit_not (void)
2039 EMIT_ASM (amd64_bit_not
,
2040 "xorq $0xffffffffffffffff,%rax");
2044 amd64_emit_equal (void)
2046 EMIT_ASM (amd64_equal
,
2047 "cmp %rax,(%rsp)\n\t"
2048 "je .Lamd64_equal_true\n\t"
2050 "jmp .Lamd64_equal_end\n\t"
2051 ".Lamd64_equal_true:\n\t"
2053 ".Lamd64_equal_end:\n\t"
2054 "lea 0x8(%rsp),%rsp");
2058 amd64_emit_less_signed (void)
2060 EMIT_ASM (amd64_less_signed
,
2061 "cmp %rax,(%rsp)\n\t"
2062 "jl .Lamd64_less_signed_true\n\t"
2064 "jmp .Lamd64_less_signed_end\n\t"
2065 ".Lamd64_less_signed_true:\n\t"
2067 ".Lamd64_less_signed_end:\n\t"
2068 "lea 0x8(%rsp),%rsp");
2072 amd64_emit_less_unsigned (void)
2074 EMIT_ASM (amd64_less_unsigned
,
2075 "cmp %rax,(%rsp)\n\t"
2076 "jb .Lamd64_less_unsigned_true\n\t"
2078 "jmp .Lamd64_less_unsigned_end\n\t"
2079 ".Lamd64_less_unsigned_true:\n\t"
2081 ".Lamd64_less_unsigned_end:\n\t"
2082 "lea 0x8(%rsp),%rsp");
2086 amd64_emit_ref (int size
)
2091 EMIT_ASM (amd64_ref1
,
2095 EMIT_ASM (amd64_ref2
,
2099 EMIT_ASM (amd64_ref4
,
2100 "movl (%rax),%eax");
2103 EMIT_ASM (amd64_ref8
,
2104 "movq (%rax),%rax");
2110 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2112 EMIT_ASM (amd64_if_goto
,
2116 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2124 amd64_emit_goto (int *offset_p
, int *size_p
)
2126 EMIT_ASM (amd64_goto
,
2127 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2135 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2137 int diff
= (to
- (from
+ size
));
2138 unsigned char buf
[sizeof (int)];
2146 memcpy (buf
, &diff
, sizeof (int));
2147 write_inferior_memory (from
, buf
, sizeof (int));
2151 amd64_emit_const (LONGEST num
)
2153 unsigned char buf
[16];
2155 CORE_ADDR buildaddr
= current_insn_ptr
;
2158 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2159 memcpy (&buf
[i
], &num
, sizeof (num
));
2161 append_insns (&buildaddr
, i
, buf
);
2162 current_insn_ptr
= buildaddr
;
2166 amd64_emit_call (CORE_ADDR fn
)
2168 unsigned char buf
[16];
2170 CORE_ADDR buildaddr
;
2173 /* The destination function being in the shared library, may be
2174 >31-bits away off the compiled code pad. */
2176 buildaddr
= current_insn_ptr
;
2178 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2182 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2184 /* Offset is too large for a call. Use callq, but that requires
2185 a register, so avoid it if possible. Use r10, since it is
2186 call-clobbered, we don't have to push/pop it. */
2187 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2189 memcpy (buf
+ i
, &fn
, 8);
2191 buf
[i
++] = 0xff; /* callq *%r10 */
2196 int offset32
= offset64
; /* we know we can't overflow here. */
2197 memcpy (buf
+ i
, &offset32
, 4);
2201 append_insns (&buildaddr
, i
, buf
);
2202 current_insn_ptr
= buildaddr
;
2206 amd64_emit_reg (int reg
)
2208 unsigned char buf
[16];
2210 CORE_ADDR buildaddr
;
2212 /* Assume raw_regs is still in %rdi. */
2213 buildaddr
= current_insn_ptr
;
2215 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2216 memcpy (&buf
[i
], ®
, sizeof (reg
));
2218 append_insns (&buildaddr
, i
, buf
);
2219 current_insn_ptr
= buildaddr
;
2220 amd64_emit_call (get_raw_reg_func_addr ());
2224 amd64_emit_pop (void)
2226 EMIT_ASM (amd64_pop
,
2231 amd64_emit_stack_flush (void)
2233 EMIT_ASM (amd64_stack_flush
,
2238 amd64_emit_zero_ext (int arg
)
2243 EMIT_ASM (amd64_zero_ext_8
,
2247 EMIT_ASM (amd64_zero_ext_16
,
2248 "and $0xffff,%rax");
2251 EMIT_ASM (amd64_zero_ext_32
,
2252 "mov $0xffffffff,%rcx\n\t"
2261 amd64_emit_swap (void)
2263 EMIT_ASM (amd64_swap
,
2270 amd64_emit_stack_adjust (int n
)
2272 unsigned char buf
[16];
2274 CORE_ADDR buildaddr
= current_insn_ptr
;
2277 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2281 /* This only handles adjustments up to 16, but we don't expect any more. */
2283 append_insns (&buildaddr
, i
, buf
);
2284 current_insn_ptr
= buildaddr
;
2287 /* FN's prototype is `LONGEST(*fn)(int)'. */
2290 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2292 unsigned char buf
[16];
2294 CORE_ADDR buildaddr
;
2296 buildaddr
= current_insn_ptr
;
2298 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2299 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2301 append_insns (&buildaddr
, i
, buf
);
2302 current_insn_ptr
= buildaddr
;
2303 amd64_emit_call (fn
);
2306 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2309 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2311 unsigned char buf
[16];
2313 CORE_ADDR buildaddr
;
2315 buildaddr
= current_insn_ptr
;
2317 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2318 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2320 append_insns (&buildaddr
, i
, buf
);
2321 current_insn_ptr
= buildaddr
;
2322 EMIT_ASM (amd64_void_call_2_a
,
2323 /* Save away a copy of the stack top. */
2325 /* Also pass top as the second argument. */
2327 amd64_emit_call (fn
);
2328 EMIT_ASM (amd64_void_call_2_b
,
2329 /* Restore the stack top, %rax may have been trashed. */
2334 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2337 "cmp %rax,(%rsp)\n\t"
2338 "jne .Lamd64_eq_fallthru\n\t"
2339 "lea 0x8(%rsp),%rsp\n\t"
2341 /* jmp, but don't trust the assembler to choose the right jump */
2342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2343 ".Lamd64_eq_fallthru:\n\t"
2344 "lea 0x8(%rsp),%rsp\n\t"
2354 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2357 "cmp %rax,(%rsp)\n\t"
2358 "je .Lamd64_ne_fallthru\n\t"
2359 "lea 0x8(%rsp),%rsp\n\t"
2361 /* jmp, but don't trust the assembler to choose the right jump */
2362 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2363 ".Lamd64_ne_fallthru:\n\t"
2364 "lea 0x8(%rsp),%rsp\n\t"
2374 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2377 "cmp %rax,(%rsp)\n\t"
2378 "jnl .Lamd64_lt_fallthru\n\t"
2379 "lea 0x8(%rsp),%rsp\n\t"
2381 /* jmp, but don't trust the assembler to choose the right jump */
2382 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2383 ".Lamd64_lt_fallthru:\n\t"
2384 "lea 0x8(%rsp),%rsp\n\t"
2394 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2397 "cmp %rax,(%rsp)\n\t"
2398 "jnle .Lamd64_le_fallthru\n\t"
2399 "lea 0x8(%rsp),%rsp\n\t"
2401 /* jmp, but don't trust the assembler to choose the right jump */
2402 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2403 ".Lamd64_le_fallthru:\n\t"
2404 "lea 0x8(%rsp),%rsp\n\t"
2414 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2417 "cmp %rax,(%rsp)\n\t"
2418 "jng .Lamd64_gt_fallthru\n\t"
2419 "lea 0x8(%rsp),%rsp\n\t"
2421 /* jmp, but don't trust the assembler to choose the right jump */
2422 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2423 ".Lamd64_gt_fallthru:\n\t"
2424 "lea 0x8(%rsp),%rsp\n\t"
2434 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2437 "cmp %rax,(%rsp)\n\t"
2438 "jnge .Lamd64_ge_fallthru\n\t"
2439 ".Lamd64_ge_jump:\n\t"
2440 "lea 0x8(%rsp),%rsp\n\t"
2442 /* jmp, but don't trust the assembler to choose the right jump */
2443 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2444 ".Lamd64_ge_fallthru:\n\t"
2445 "lea 0x8(%rsp),%rsp\n\t"
2454 struct emit_ops amd64_emit_ops
=
2456 amd64_emit_prologue
,
2457 amd64_emit_epilogue
,
2462 amd64_emit_rsh_signed
,
2463 amd64_emit_rsh_unsigned
,
2471 amd64_emit_less_signed
,
2472 amd64_emit_less_unsigned
,
2476 amd64_write_goto_address
,
2481 amd64_emit_stack_flush
,
2482 amd64_emit_zero_ext
,
2484 amd64_emit_stack_adjust
,
2485 amd64_emit_int_call_1
,
2486 amd64_emit_void_call_2
,
2495 #endif /* __x86_64__ */
2498 i386_emit_prologue (void)
2500 EMIT_ASM32 (i386_prologue
,
2504 /* At this point, the raw regs base address is at 8(%ebp), and the
2505 value pointer is at 12(%ebp). */
2509 i386_emit_epilogue (void)
2511 EMIT_ASM32 (i386_epilogue
,
2512 "mov 12(%ebp),%ecx\n\t"
2513 "mov %eax,(%ecx)\n\t"
2514 "mov %ebx,0x4(%ecx)\n\t"
2522 i386_emit_add (void)
2524 EMIT_ASM32 (i386_add
,
2525 "add (%esp),%eax\n\t"
2526 "adc 0x4(%esp),%ebx\n\t"
2527 "lea 0x8(%esp),%esp");
2531 i386_emit_sub (void)
2533 EMIT_ASM32 (i386_sub
,
2534 "subl %eax,(%esp)\n\t"
2535 "sbbl %ebx,4(%esp)\n\t"
2541 i386_emit_mul (void)
2547 i386_emit_lsh (void)
2553 i386_emit_rsh_signed (void)
2559 i386_emit_rsh_unsigned (void)
2565 i386_emit_ext (int arg
)
2570 EMIT_ASM32 (i386_ext_8
,
2573 "movl %eax,%ebx\n\t"
2577 EMIT_ASM32 (i386_ext_16
,
2579 "movl %eax,%ebx\n\t"
2583 EMIT_ASM32 (i386_ext_32
,
2584 "movl %eax,%ebx\n\t"
2593 i386_emit_log_not (void)
2595 EMIT_ASM32 (i386_log_not
,
2597 "test %eax,%eax\n\t"
2604 i386_emit_bit_and (void)
2606 EMIT_ASM32 (i386_and
,
2607 "and (%esp),%eax\n\t"
2608 "and 0x4(%esp),%ebx\n\t"
2609 "lea 0x8(%esp),%esp");
2613 i386_emit_bit_or (void)
2615 EMIT_ASM32 (i386_or
,
2616 "or (%esp),%eax\n\t"
2617 "or 0x4(%esp),%ebx\n\t"
2618 "lea 0x8(%esp),%esp");
2622 i386_emit_bit_xor (void)
2624 EMIT_ASM32 (i386_xor
,
2625 "xor (%esp),%eax\n\t"
2626 "xor 0x4(%esp),%ebx\n\t"
2627 "lea 0x8(%esp),%esp");
2631 i386_emit_bit_not (void)
2633 EMIT_ASM32 (i386_bit_not
,
2634 "xor $0xffffffff,%eax\n\t"
2635 "xor $0xffffffff,%ebx\n\t");
2639 i386_emit_equal (void)
2641 EMIT_ASM32 (i386_equal
,
2642 "cmpl %ebx,4(%esp)\n\t"
2643 "jne .Li386_equal_false\n\t"
2644 "cmpl %eax,(%esp)\n\t"
2645 "je .Li386_equal_true\n\t"
2646 ".Li386_equal_false:\n\t"
2648 "jmp .Li386_equal_end\n\t"
2649 ".Li386_equal_true:\n\t"
2651 ".Li386_equal_end:\n\t"
2653 "lea 0x8(%esp),%esp");
2657 i386_emit_less_signed (void)
2659 EMIT_ASM32 (i386_less_signed
,
2660 "cmpl %ebx,4(%esp)\n\t"
2661 "jl .Li386_less_signed_true\n\t"
2662 "jne .Li386_less_signed_false\n\t"
2663 "cmpl %eax,(%esp)\n\t"
2664 "jl .Li386_less_signed_true\n\t"
2665 ".Li386_less_signed_false:\n\t"
2667 "jmp .Li386_less_signed_end\n\t"
2668 ".Li386_less_signed_true:\n\t"
2670 ".Li386_less_signed_end:\n\t"
2672 "lea 0x8(%esp),%esp");
2676 i386_emit_less_unsigned (void)
2678 EMIT_ASM32 (i386_less_unsigned
,
2679 "cmpl %ebx,4(%esp)\n\t"
2680 "jb .Li386_less_unsigned_true\n\t"
2681 "jne .Li386_less_unsigned_false\n\t"
2682 "cmpl %eax,(%esp)\n\t"
2683 "jb .Li386_less_unsigned_true\n\t"
2684 ".Li386_less_unsigned_false:\n\t"
2686 "jmp .Li386_less_unsigned_end\n\t"
2687 ".Li386_less_unsigned_true:\n\t"
2689 ".Li386_less_unsigned_end:\n\t"
2691 "lea 0x8(%esp),%esp");
2695 i386_emit_ref (int size
)
2700 EMIT_ASM32 (i386_ref1
,
2704 EMIT_ASM32 (i386_ref2
,
2708 EMIT_ASM32 (i386_ref4
,
2709 "movl (%eax),%eax");
2712 EMIT_ASM32 (i386_ref8
,
2713 "movl 4(%eax),%ebx\n\t"
2714 "movl (%eax),%eax");
2720 i386_emit_if_goto (int *offset_p
, int *size_p
)
2722 EMIT_ASM32 (i386_if_goto
,
2728 /* Don't trust the assembler to choose the right jump */
2729 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2732 *offset_p
= 11; /* be sure that this matches the sequence above */
2738 i386_emit_goto (int *offset_p
, int *size_p
)
2740 EMIT_ASM32 (i386_goto
,
2741 /* Don't trust the assembler to choose the right jump */
2742 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2750 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2752 int diff
= (to
- (from
+ size
));
2753 unsigned char buf
[sizeof (int)];
2755 /* We're only doing 4-byte sizes at the moment. */
2762 memcpy (buf
, &diff
, sizeof (int));
2763 write_inferior_memory (from
, buf
, sizeof (int));
2767 i386_emit_const (LONGEST num
)
2769 unsigned char buf
[16];
2771 CORE_ADDR buildaddr
= current_insn_ptr
;
2774 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2775 lo
= num
& 0xffffffff;
2776 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2778 hi
= ((num
>> 32) & 0xffffffff);
2781 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2782 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2787 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2789 append_insns (&buildaddr
, i
, buf
);
2790 current_insn_ptr
= buildaddr
;
2794 i386_emit_call (CORE_ADDR fn
)
2796 unsigned char buf
[16];
2798 CORE_ADDR buildaddr
;
2800 buildaddr
= current_insn_ptr
;
2802 buf
[i
++] = 0xe8; /* call <reladdr> */
2803 offset
= ((int) fn
) - (buildaddr
+ 5);
2804 memcpy (buf
+ 1, &offset
, 4);
2805 append_insns (&buildaddr
, 5, buf
);
2806 current_insn_ptr
= buildaddr
;
2810 i386_emit_reg (int reg
)
2812 unsigned char buf
[16];
2814 CORE_ADDR buildaddr
;
2816 EMIT_ASM32 (i386_reg_a
,
2818 buildaddr
= current_insn_ptr
;
2820 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2821 memcpy (&buf
[i
], ®
, sizeof (reg
));
2823 append_insns (&buildaddr
, i
, buf
);
2824 current_insn_ptr
= buildaddr
;
2825 EMIT_ASM32 (i386_reg_b
,
2826 "mov %eax,4(%esp)\n\t"
2827 "mov 8(%ebp),%eax\n\t"
2829 i386_emit_call (get_raw_reg_func_addr ());
2830 EMIT_ASM32 (i386_reg_c
,
2832 "lea 0x8(%esp),%esp");
2836 i386_emit_pop (void)
2838 EMIT_ASM32 (i386_pop
,
2844 i386_emit_stack_flush (void)
2846 EMIT_ASM32 (i386_stack_flush
,
2852 i386_emit_zero_ext (int arg
)
2857 EMIT_ASM32 (i386_zero_ext_8
,
2858 "and $0xff,%eax\n\t"
2862 EMIT_ASM32 (i386_zero_ext_16
,
2863 "and $0xffff,%eax\n\t"
2867 EMIT_ASM32 (i386_zero_ext_32
,
2876 i386_emit_swap (void)
2878 EMIT_ASM32 (i386_swap
,
2888 i386_emit_stack_adjust (int n
)
2890 unsigned char buf
[16];
2892 CORE_ADDR buildaddr
= current_insn_ptr
;
2895 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2899 append_insns (&buildaddr
, i
, buf
);
2900 current_insn_ptr
= buildaddr
;
2903 /* FN's prototype is `LONGEST(*fn)(int)'. */
2906 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2908 unsigned char buf
[16];
2910 CORE_ADDR buildaddr
;
2912 EMIT_ASM32 (i386_int_call_1_a
,
2913 /* Reserve a bit of stack space. */
2915 /* Put the one argument on the stack. */
2916 buildaddr
= current_insn_ptr
;
2918 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2921 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2923 append_insns (&buildaddr
, i
, buf
);
2924 current_insn_ptr
= buildaddr
;
2925 i386_emit_call (fn
);
2926 EMIT_ASM32 (i386_int_call_1_c
,
2928 "lea 0x8(%esp),%esp");
2931 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2934 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2936 unsigned char buf
[16];
2938 CORE_ADDR buildaddr
;
2940 EMIT_ASM32 (i386_void_call_2_a
,
2941 /* Preserve %eax only; we don't have to worry about %ebx. */
2943 /* Reserve a bit of stack space for arguments. */
2944 "sub $0x10,%esp\n\t"
2945 /* Copy "top" to the second argument position. (Note that
2946 we can't assume function won't scribble on its
2947 arguments, so don't try to restore from this.) */
2948 "mov %eax,4(%esp)\n\t"
2949 "mov %ebx,8(%esp)");
2950 /* Put the first argument on the stack. */
2951 buildaddr
= current_insn_ptr
;
2953 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2956 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2958 append_insns (&buildaddr
, i
, buf
);
2959 current_insn_ptr
= buildaddr
;
2960 i386_emit_call (fn
);
2961 EMIT_ASM32 (i386_void_call_2_b
,
2962 "lea 0x10(%esp),%esp\n\t"
2963 /* Restore original stack top. */
2969 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2972 /* Check low half first, more likely to be decider */
2973 "cmpl %eax,(%esp)\n\t"
2974 "jne .Leq_fallthru\n\t"
2975 "cmpl %ebx,4(%esp)\n\t"
2976 "jne .Leq_fallthru\n\t"
2977 "lea 0x8(%esp),%esp\n\t"
2980 /* jmp, but don't trust the assembler to choose the right jump */
2981 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2982 ".Leq_fallthru:\n\t"
2983 "lea 0x8(%esp),%esp\n\t"
2994 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2997 /* Check low half first, more likely to be decider */
2998 "cmpl %eax,(%esp)\n\t"
3000 "cmpl %ebx,4(%esp)\n\t"
3001 "je .Lne_fallthru\n\t"
3003 "lea 0x8(%esp),%esp\n\t"
3006 /* jmp, but don't trust the assembler to choose the right jump */
3007 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3008 ".Lne_fallthru:\n\t"
3009 "lea 0x8(%esp),%esp\n\t"
3020 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3023 "cmpl %ebx,4(%esp)\n\t"
3025 "jne .Llt_fallthru\n\t"
3026 "cmpl %eax,(%esp)\n\t"
3027 "jnl .Llt_fallthru\n\t"
3029 "lea 0x8(%esp),%esp\n\t"
3032 /* jmp, but don't trust the assembler to choose the right jump */
3033 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3034 ".Llt_fallthru:\n\t"
3035 "lea 0x8(%esp),%esp\n\t"
3046 i386_emit_le_goto (int *offset_p
, int *size_p
)
3049 "cmpl %ebx,4(%esp)\n\t"
3051 "jne .Lle_fallthru\n\t"
3052 "cmpl %eax,(%esp)\n\t"
3053 "jnle .Lle_fallthru\n\t"
3055 "lea 0x8(%esp),%esp\n\t"
3058 /* jmp, but don't trust the assembler to choose the right jump */
3059 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3060 ".Lle_fallthru:\n\t"
3061 "lea 0x8(%esp),%esp\n\t"
3072 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3075 "cmpl %ebx,4(%esp)\n\t"
3077 "jne .Lgt_fallthru\n\t"
3078 "cmpl %eax,(%esp)\n\t"
3079 "jng .Lgt_fallthru\n\t"
3081 "lea 0x8(%esp),%esp\n\t"
3084 /* jmp, but don't trust the assembler to choose the right jump */
3085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3086 ".Lgt_fallthru:\n\t"
3087 "lea 0x8(%esp),%esp\n\t"
3098 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3101 "cmpl %ebx,4(%esp)\n\t"
3103 "jne .Lge_fallthru\n\t"
3104 "cmpl %eax,(%esp)\n\t"
3105 "jnge .Lge_fallthru\n\t"
3107 "lea 0x8(%esp),%esp\n\t"
3110 /* jmp, but don't trust the assembler to choose the right jump */
3111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3112 ".Lge_fallthru:\n\t"
3113 "lea 0x8(%esp),%esp\n\t"
3123 struct emit_ops i386_emit_ops
=
3131 i386_emit_rsh_signed
,
3132 i386_emit_rsh_unsigned
,
3140 i386_emit_less_signed
,
3141 i386_emit_less_unsigned
,
3145 i386_write_goto_address
,
3150 i386_emit_stack_flush
,
3153 i386_emit_stack_adjust
,
3154 i386_emit_int_call_1
,
3155 i386_emit_void_call_2
,
3165 static struct emit_ops
*
3169 int use_64bit
= register_size (0) == 8;
3172 return &amd64_emit_ops
;
3175 return &i386_emit_ops
;
3179 x86_supports_range_stepping (void)
3184 /* This is initialized assuming an amd64 target.
3185 x86_arch_setup will correct it for i386 or amd64 targets. */
3187 struct linux_target_ops the_low_target
=
3195 NULL
, /* fetch_register */
3205 x86_stopped_by_watchpoint
,
3206 x86_stopped_data_address
,
3207 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3208 native i386 case (no registers smaller than an xfer unit), and are not
3209 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3212 /* need to fix up i386 siginfo if host is amd64 */
3214 x86_linux_new_process
,
3215 x86_linux_new_thread
,
3216 x86_linux_prepare_to_resume
,
3217 x86_linux_process_qsupported
,
3218 x86_supports_tracepoints
,
3219 x86_get_thread_area
,
3220 x86_install_fast_tracepoint_jump_pad
,
3222 x86_get_min_fast_tracepoint_insn_len
,
3223 x86_supports_range_stepping
,