1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 extern const struct target_desc
*tdesc_amd64_linux
;
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 extern const struct target_desc
*tdesc_amd64_avx_linux
;
44 /* Defined in auto-generated file x32-linux.c. */
45 void init_registers_x32_linux (void);
46 extern const struct target_desc
*tdesc_x32_linux
;
48 /* Defined in auto-generated file x32-avx-linux.c. */
49 void init_registers_x32_avx_linux (void);
50 extern const struct target_desc
*tdesc_x32_avx_linux
;
53 /* Defined in auto-generated file i386-linux.c. */
54 void init_registers_i386_linux (void);
55 extern const struct target_desc
*tdesc_i386_linux
;
57 /* Defined in auto-generated file i386-mmx-linux.c. */
58 void init_registers_i386_mmx_linux (void);
59 extern const struct target_desc
*tdesc_i386_mmx_linux
;
61 /* Defined in auto-generated file i386-avx-linux.c. */
62 void init_registers_i386_avx_linux (void);
63 extern const struct target_desc
*tdesc_i386_avx_linux
;
66 static struct target_desc
*tdesc_amd64_linux_no_xml
;
68 static struct target_desc
*tdesc_i386_linux_no_xml
;
71 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
72 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
74 /* Backward compatibility for gdb without XML support. */
76 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
77 <architecture>i386</architecture>\
78 <osabi>GNU/Linux</osabi>\
82 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
83 <architecture>i386:x86-64</architecture>\
84 <osabi>GNU/Linux</osabi>\
89 #include <sys/procfs.h>
90 #include <sys/ptrace.h>
93 #ifndef PTRACE_GETREGSET
94 #define PTRACE_GETREGSET 0x4204
97 #ifndef PTRACE_SETREGSET
98 #define PTRACE_SETREGSET 0x4205
102 #ifndef PTRACE_GET_THREAD_AREA
103 #define PTRACE_GET_THREAD_AREA 25
106 /* This definition comes from prctl.h, but some kernels may not have it. */
107 #ifndef PTRACE_ARCH_PRCTL
108 #define PTRACE_ARCH_PRCTL 30
111 /* The following definitions come from prctl.h, but may be absent
112 for certain configurations. */
114 #define ARCH_SET_GS 0x1001
115 #define ARCH_SET_FS 0x1002
116 #define ARCH_GET_FS 0x1003
117 #define ARCH_GET_GS 0x1004
120 /* Per-process arch-specific data we want to keep. */
122 struct arch_process_info
124 struct i386_debug_reg_state debug_reg_state
;
127 /* Per-thread arch-specific data we want to keep. */
131 /* Non-zero if our copy differs from what's recorded in the thread. */
132 int debug_registers_changed
;
137 /* Mapping between the general-purpose registers in `struct user'
138 format and GDB's register array layout.
139 Note that the transfer layout uses 64-bit regs. */
140 static /*const*/ int i386_regmap
[] =
142 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
143 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
144 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
145 DS
* 8, ES
* 8, FS
* 8, GS
* 8
148 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
150 /* So code below doesn't have to care, i386 or amd64. */
151 #define ORIG_EAX ORIG_RAX
153 static const int x86_64_regmap
[] =
155 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
156 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
157 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
158 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
159 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
160 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
161 -1, -1, -1, -1, -1, -1, -1, -1,
162 -1, -1, -1, -1, -1, -1, -1, -1,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1, -1,
168 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
170 #else /* ! __x86_64__ */
172 /* Mapping between the general-purpose registers in `struct user'
173 format and GDB's register array layout. */
174 static /*const*/ int i386_regmap
[] =
176 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
177 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
178 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
179 DS
* 4, ES
* 4, FS
* 4, GS
* 4
182 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
188 /* Returns true if the current inferior belongs to a x86-64 process,
192 is_64bit_tdesc (void)
194 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
196 return register_size (regcache
->tdesc
, 0) == 8;
202 /* Called by libthread_db. */
205 ps_get_thread_area (const struct ps_prochandle
*ph
,
206 lwpid_t lwpid
, int idx
, void **base
)
209 int use_64bit
= is_64bit_tdesc ();
216 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
220 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
231 unsigned int desc
[4];
233 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
234 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
237 /* Ensure we properly extend the value to 64-bits for x86_64. */
238 *base
= (void *) (uintptr_t) desc
[1];
243 /* Get the thread area address. This is used to recognize which
244 thread is which when tracing with the in-process agent library. We
245 don't read anything from the address, and treat it as opaque; it's
246 the address itself that we assume is unique per-thread. */
249 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
252 int use_64bit
= is_64bit_tdesc ();
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
259 *addr
= (CORE_ADDR
) (uintptr_t) base
;
268 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
269 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
270 unsigned int desc
[4];
272 const int reg_thread_area
= 3; /* bits to scale down register value. */
275 collect_register_by_name (regcache
, "gs", &gs
);
277 idx
= gs
>> reg_thread_area
;
279 if (ptrace (PTRACE_GET_THREAD_AREA
,
281 (void *) (long) idx
, (unsigned long) &desc
) < 0)
292 x86_cannot_store_register (int regno
)
295 if (is_64bit_tdesc ())
299 return regno
>= I386_NUM_REGS
;
303 x86_cannot_fetch_register (int regno
)
306 if (is_64bit_tdesc ())
310 return regno
>= I386_NUM_REGS
;
314 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
319 if (register_size (regcache
->tdesc
, 0) == 8)
321 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
322 if (x86_64_regmap
[i
] != -1)
323 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
328 for (i
= 0; i
< I386_NUM_REGS
; i
++)
329 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
331 collect_register_by_name (regcache
, "orig_eax",
332 ((char *) buf
) + ORIG_EAX
* 4);
336 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
341 if (register_size (regcache
->tdesc
, 0) == 8)
343 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
344 if (x86_64_regmap
[i
] != -1)
345 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
350 for (i
= 0; i
< I386_NUM_REGS
; i
++)
351 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
353 supply_register_by_name (regcache
, "orig_eax",
354 ((char *) buf
) + ORIG_EAX
* 4);
358 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
361 i387_cache_to_fxsave (regcache
, buf
);
363 i387_cache_to_fsave (regcache
, buf
);
368 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
371 i387_fxsave_to_cache (regcache
, buf
);
373 i387_fsave_to_cache (regcache
, buf
);
380 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
382 i387_cache_to_fxsave (regcache
, buf
);
386 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
388 i387_fxsave_to_cache (regcache
, buf
);
394 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
396 i387_cache_to_xsave (regcache
, buf
);
400 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
402 i387_xsave_to_cache (regcache
, buf
);
405 /* ??? The non-biarch i386 case stores all the i387 regs twice.
406 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
407 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
408 doesn't work. IWBN to avoid the duplication in the case where it
409 does work. Maybe the arch_setup routine could check whether it works
410 and update the supported regsets accordingly. */
412 static struct regset_info x86_regsets
[] =
414 #ifdef HAVE_PTRACE_GETREGS
415 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
417 x86_fill_gregset
, x86_store_gregset
},
418 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
419 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
421 # ifdef HAVE_PTRACE_GETFPXREGS
422 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
424 x86_fill_fpxregset
, x86_store_fpxregset
},
427 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
429 x86_fill_fpregset
, x86_store_fpregset
},
430 #endif /* HAVE_PTRACE_GETREGS */
431 { 0, 0, 0, -1, -1, NULL
, NULL
}
435 x86_get_pc (struct regcache
*regcache
)
437 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
442 collect_register_by_name (regcache
, "rip", &pc
);
443 return (CORE_ADDR
) pc
;
448 collect_register_by_name (regcache
, "eip", &pc
);
449 return (CORE_ADDR
) pc
;
454 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
456 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
460 unsigned long newpc
= pc
;
461 supply_register_by_name (regcache
, "rip", &newpc
);
465 unsigned int newpc
= pc
;
466 supply_register_by_name (regcache
, "eip", &newpc
);
470 static const unsigned char x86_breakpoint
[] = { 0xCC };
471 #define x86_breakpoint_len 1
474 x86_breakpoint_at (CORE_ADDR pc
)
478 (*the_target
->read_memory
) (pc
, &c
, 1);
485 /* Support for debug registers. */
488 x86_linux_dr_get (ptid_t ptid
, int regnum
)
493 tid
= ptid_get_lwp (ptid
);
496 value
= ptrace (PTRACE_PEEKUSER
, tid
,
497 offsetof (struct user
, u_debugreg
[regnum
]), 0);
499 error ("Couldn't read debug register");
505 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
509 tid
= ptid_get_lwp (ptid
);
512 ptrace (PTRACE_POKEUSER
, tid
,
513 offsetof (struct user
, u_debugreg
[regnum
]), value
);
515 error ("Couldn't write debug register");
519 update_debug_registers_callback (struct inferior_list_entry
*entry
,
522 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
523 int pid
= *(int *) pid_p
;
525 /* Only update the threads of this process. */
526 if (pid_of (lwp
) == pid
)
528 /* The actual update is done later just before resuming the lwp,
529 we just mark that the registers need updating. */
530 lwp
->arch_private
->debug_registers_changed
= 1;
532 /* If the lwp isn't stopped, force it to momentarily pause, so
533 we can update its debug registers. */
535 linux_stop_lwp (lwp
);
541 /* Update the inferior's debug register REGNUM from STATE. */
544 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
546 /* Only update the threads of this process. */
547 int pid
= pid_of (get_thread_lwp (current_inferior
));
549 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
550 fatal ("Invalid debug register %d", regnum
);
552 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
555 /* Return the inferior's debug register REGNUM. */
558 i386_dr_low_get_addr (int regnum
)
560 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
561 ptid_t ptid
= ptid_of (lwp
);
563 /* DR6 and DR7 are retrieved with some other way. */
564 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
566 return x86_linux_dr_get (ptid
, regnum
);
569 /* Update the inferior's DR7 debug control register from STATE. */
572 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
574 /* Only update the threads of this process. */
575 int pid
= pid_of (get_thread_lwp (current_inferior
));
577 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
580 /* Return the inferior's DR7 debug control register. */
583 i386_dr_low_get_control (void)
585 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
586 ptid_t ptid
= ptid_of (lwp
);
588 return x86_linux_dr_get (ptid
, DR_CONTROL
);
591 /* Get the value of the DR6 debug status register from the inferior
592 and record it in STATE. */
595 i386_dr_low_get_status (void)
597 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
598 ptid_t ptid
= ptid_of (lwp
);
600 return x86_linux_dr_get (ptid
, DR_STATUS
);
603 /* Breakpoint/Watchpoint support. */
606 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
608 struct process_info
*proc
= current_process ();
611 case '0': /* software-breakpoint */
615 ret
= prepare_to_access_memory ();
618 ret
= set_gdb_breakpoint_at (addr
);
619 done_accessing_memory ();
622 case '1': /* hardware-breakpoint */
623 case '2': /* write watchpoint */
624 case '3': /* read watchpoint */
625 case '4': /* access watchpoint */
626 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
636 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
638 struct process_info
*proc
= current_process ();
641 case '0': /* software-breakpoint */
645 ret
= prepare_to_access_memory ();
648 ret
= delete_gdb_breakpoint_at (addr
);
649 done_accessing_memory ();
652 case '1': /* hardware-breakpoint */
653 case '2': /* write watchpoint */
654 case '3': /* read watchpoint */
655 case '4': /* access watchpoint */
656 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
665 x86_stopped_by_watchpoint (void)
667 struct process_info
*proc
= current_process ();
668 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
672 x86_stopped_data_address (void)
674 struct process_info
*proc
= current_process ();
676 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
682 /* Called when a new process is created. */
684 static struct arch_process_info
*
685 x86_linux_new_process (void)
687 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
689 i386_low_init_dregs (&info
->debug_reg_state
);
694 /* Called when a new thread is detected. */
696 static struct arch_lwp_info
*
697 x86_linux_new_thread (void)
699 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
701 info
->debug_registers_changed
= 1;
706 /* Called when resuming a thread.
707 If the debug regs have changed, update the thread's copies. */
710 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
712 ptid_t ptid
= ptid_of (lwp
);
713 int clear_status
= 0;
715 if (lwp
->arch_private
->debug_registers_changed
)
718 int pid
= ptid_get_pid (ptid
);
719 struct process_info
*proc
= find_process_pid (pid
);
720 struct i386_debug_reg_state
*state
721 = &proc
->private->arch_private
->debug_reg_state
;
723 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
724 if (state
->dr_ref_count
[i
] > 0)
726 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
728 /* If we're setting a watchpoint, any change the inferior
729 had done itself to the debug registers needs to be
730 discarded, otherwise, i386_low_stopped_data_address can
735 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
737 lwp
->arch_private
->debug_registers_changed
= 0;
740 if (clear_status
|| lwp
->stopped_by_watchpoint
)
741 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
744 /* When GDBSERVER is built as a 64-bit application on linux, the
745 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
746 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
747 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
748 conversion in-place ourselves. */
750 /* These types below (compat_*) define a siginfo type that is layout
751 compatible with the siginfo type exported by the 32-bit userspace
756 typedef int compat_int_t
;
757 typedef unsigned int compat_uptr_t
;
759 typedef int compat_time_t
;
760 typedef int compat_timer_t
;
761 typedef int compat_clock_t
;
763 struct compat_timeval
765 compat_time_t tv_sec
;
769 typedef union compat_sigval
771 compat_int_t sival_int
;
772 compat_uptr_t sival_ptr
;
775 typedef struct compat_siginfo
783 int _pad
[((128 / sizeof (int)) - 3)];
792 /* POSIX.1b timers */
797 compat_sigval_t _sigval
;
800 /* POSIX.1b signals */
805 compat_sigval_t _sigval
;
814 compat_clock_t _utime
;
815 compat_clock_t _stime
;
818 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
833 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
834 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
836 typedef struct compat_x32_siginfo
844 int _pad
[((128 / sizeof (int)) - 3)];
853 /* POSIX.1b timers */
858 compat_sigval_t _sigval
;
861 /* POSIX.1b signals */
866 compat_sigval_t _sigval
;
875 compat_x32_clock_t _utime
;
876 compat_x32_clock_t _stime
;
879 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
892 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
894 #define cpt_si_pid _sifields._kill._pid
895 #define cpt_si_uid _sifields._kill._uid
896 #define cpt_si_timerid _sifields._timer._tid
897 #define cpt_si_overrun _sifields._timer._overrun
898 #define cpt_si_status _sifields._sigchld._status
899 #define cpt_si_utime _sifields._sigchld._utime
900 #define cpt_si_stime _sifields._sigchld._stime
901 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
902 #define cpt_si_addr _sifields._sigfault._addr
903 #define cpt_si_band _sifields._sigpoll._band
904 #define cpt_si_fd _sifields._sigpoll._fd
906 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
907 In their place is si_timer1,si_timer2. */
909 #define si_timerid si_timer1
912 #define si_overrun si_timer2
916 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
918 memset (to
, 0, sizeof (*to
));
920 to
->si_signo
= from
->si_signo
;
921 to
->si_errno
= from
->si_errno
;
922 to
->si_code
= from
->si_code
;
924 if (to
->si_code
== SI_TIMER
)
926 to
->cpt_si_timerid
= from
->si_timerid
;
927 to
->cpt_si_overrun
= from
->si_overrun
;
928 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
930 else if (to
->si_code
== SI_USER
)
932 to
->cpt_si_pid
= from
->si_pid
;
933 to
->cpt_si_uid
= from
->si_uid
;
935 else if (to
->si_code
< 0)
937 to
->cpt_si_pid
= from
->si_pid
;
938 to
->cpt_si_uid
= from
->si_uid
;
939 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
943 switch (to
->si_signo
)
946 to
->cpt_si_pid
= from
->si_pid
;
947 to
->cpt_si_uid
= from
->si_uid
;
948 to
->cpt_si_status
= from
->si_status
;
949 to
->cpt_si_utime
= from
->si_utime
;
950 to
->cpt_si_stime
= from
->si_stime
;
956 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
959 to
->cpt_si_band
= from
->si_band
;
960 to
->cpt_si_fd
= from
->si_fd
;
963 to
->cpt_si_pid
= from
->si_pid
;
964 to
->cpt_si_uid
= from
->si_uid
;
965 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
972 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
974 memset (to
, 0, sizeof (*to
));
976 to
->si_signo
= from
->si_signo
;
977 to
->si_errno
= from
->si_errno
;
978 to
->si_code
= from
->si_code
;
980 if (to
->si_code
== SI_TIMER
)
982 to
->si_timerid
= from
->cpt_si_timerid
;
983 to
->si_overrun
= from
->cpt_si_overrun
;
984 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
986 else if (to
->si_code
== SI_USER
)
988 to
->si_pid
= from
->cpt_si_pid
;
989 to
->si_uid
= from
->cpt_si_uid
;
991 else if (to
->si_code
< 0)
993 to
->si_pid
= from
->cpt_si_pid
;
994 to
->si_uid
= from
->cpt_si_uid
;
995 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
999 switch (to
->si_signo
)
1002 to
->si_pid
= from
->cpt_si_pid
;
1003 to
->si_uid
= from
->cpt_si_uid
;
1004 to
->si_status
= from
->cpt_si_status
;
1005 to
->si_utime
= from
->cpt_si_utime
;
1006 to
->si_stime
= from
->cpt_si_stime
;
1012 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1015 to
->si_band
= from
->cpt_si_band
;
1016 to
->si_fd
= from
->cpt_si_fd
;
1019 to
->si_pid
= from
->cpt_si_pid
;
1020 to
->si_uid
= from
->cpt_si_uid
;
1021 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1028 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1031 memset (to
, 0, sizeof (*to
));
1033 to
->si_signo
= from
->si_signo
;
1034 to
->si_errno
= from
->si_errno
;
1035 to
->si_code
= from
->si_code
;
1037 if (to
->si_code
== SI_TIMER
)
1039 to
->cpt_si_timerid
= from
->si_timerid
;
1040 to
->cpt_si_overrun
= from
->si_overrun
;
1041 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1043 else if (to
->si_code
== SI_USER
)
1045 to
->cpt_si_pid
= from
->si_pid
;
1046 to
->cpt_si_uid
= from
->si_uid
;
1048 else if (to
->si_code
< 0)
1050 to
->cpt_si_pid
= from
->si_pid
;
1051 to
->cpt_si_uid
= from
->si_uid
;
1052 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1056 switch (to
->si_signo
)
1059 to
->cpt_si_pid
= from
->si_pid
;
1060 to
->cpt_si_uid
= from
->si_uid
;
1061 to
->cpt_si_status
= from
->si_status
;
1062 to
->cpt_si_utime
= from
->si_utime
;
1063 to
->cpt_si_stime
= from
->si_stime
;
1069 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1072 to
->cpt_si_band
= from
->si_band
;
1073 to
->cpt_si_fd
= from
->si_fd
;
1076 to
->cpt_si_pid
= from
->si_pid
;
1077 to
->cpt_si_uid
= from
->si_uid
;
1078 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1085 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1086 compat_x32_siginfo_t
*from
)
1088 memset (to
, 0, sizeof (*to
));
1090 to
->si_signo
= from
->si_signo
;
1091 to
->si_errno
= from
->si_errno
;
1092 to
->si_code
= from
->si_code
;
1094 if (to
->si_code
== SI_TIMER
)
1096 to
->si_timerid
= from
->cpt_si_timerid
;
1097 to
->si_overrun
= from
->cpt_si_overrun
;
1098 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1100 else if (to
->si_code
== SI_USER
)
1102 to
->si_pid
= from
->cpt_si_pid
;
1103 to
->si_uid
= from
->cpt_si_uid
;
1105 else if (to
->si_code
< 0)
1107 to
->si_pid
= from
->cpt_si_pid
;
1108 to
->si_uid
= from
->cpt_si_uid
;
1109 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1113 switch (to
->si_signo
)
1116 to
->si_pid
= from
->cpt_si_pid
;
1117 to
->si_uid
= from
->cpt_si_uid
;
1118 to
->si_status
= from
->cpt_si_status
;
1119 to
->si_utime
= from
->cpt_si_utime
;
1120 to
->si_stime
= from
->cpt_si_stime
;
1126 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1129 to
->si_band
= from
->cpt_si_band
;
1130 to
->si_fd
= from
->cpt_si_fd
;
1133 to
->si_pid
= from
->cpt_si_pid
;
1134 to
->si_uid
= from
->cpt_si_uid
;
1135 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1141 #endif /* __x86_64__ */
1143 /* Convert a native/host siginfo object, into/from the siginfo in the
1144 layout of the inferiors' architecture. Returns true if any
1145 conversion was done; false otherwise. If DIRECTION is 1, then copy
1146 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1150 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1153 unsigned int machine
;
1154 int tid
= lwpid_of (get_thread_lwp (current_inferior
));
1155 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1157 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1158 if (!is_64bit_tdesc ())
1160 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1161 fatal ("unexpected difference in siginfo");
1164 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1166 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1170 /* No fixup for native x32 GDB. */
1171 else if (!is_elf64
&& sizeof (void *) == 8)
1173 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1174 fatal ("unexpected difference in siginfo");
1177 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1180 siginfo_from_compat_x32_siginfo (native
,
1181 (struct compat_x32_siginfo
*) inf
);
1192 /* Format of XSAVE extended state is:
1195 fxsave_bytes[0..463]
1196 sw_usable_bytes[464..511]
1197 xstate_hdr_bytes[512..575]
1202 Same memory layout will be used for the coredump NT_X86_XSTATE
1203 representing the XSAVE extended state registers.
1205 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1206 extended state mask, which is the same as the extended control register
1207 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1208 together with the mask saved in the xstate_hdr_bytes to determine what
1209 states the processor/OS supports and what state, used or initialized,
1210 the process/thread is in. */
1211 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1213 /* Does the current host support the GETFPXREGS request? The header
1214 file may or may not define it, and even if it is defined, the
1215 kernel will return EIO if it's running on a pre-SSE processor. */
1216 int have_ptrace_getfpxregs
=
1217 #ifdef HAVE_PTRACE_GETFPXREGS
1224 /* Does the current host support PTRACE_GETREGSET? */
1225 static int have_ptrace_getregset
= -1;
1227 /* Get Linux/x86 target description from running target. */
1229 static const struct target_desc
*
1230 x86_linux_read_description (void)
1232 unsigned int machine
;
1236 static uint64_t xcr0
;
1237 struct regset_info
*regset
;
1239 tid
= lwpid_of (get_thread_lwp (current_inferior
));
1241 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1243 if (sizeof (void *) == 4)
1246 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1248 else if (machine
== EM_X86_64
)
1249 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1253 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1254 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1256 elf_fpxregset_t fpxregs
;
1258 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1260 have_ptrace_getfpxregs
= 0;
1261 have_ptrace_getregset
= 0;
1262 return tdesc_i386_mmx_linux
;
1265 have_ptrace_getfpxregs
= 1;
1271 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1273 /* Don't use XML. */
1275 if (machine
== EM_X86_64
)
1276 return tdesc_amd64_linux_no_xml
;
1279 return tdesc_i386_linux_no_xml
;
1282 if (have_ptrace_getregset
== -1)
1284 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1287 iov
.iov_base
= xstateregs
;
1288 iov
.iov_len
= sizeof (xstateregs
);
1290 /* Check if PTRACE_GETREGSET works. */
1291 if (ptrace (PTRACE_GETREGSET
, tid
,
1292 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1293 have_ptrace_getregset
= 0;
1296 have_ptrace_getregset
= 1;
1298 /* Get XCR0 from XSAVE extended state. */
1299 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1300 / sizeof (uint64_t))];
1302 /* Use PTRACE_GETREGSET if it is available. */
1303 for (regset
= x86_regsets
;
1304 regset
->fill_function
!= NULL
; regset
++)
1305 if (regset
->get_request
== PTRACE_GETREGSET
)
1306 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1307 else if (regset
->type
!= GENERAL_REGS
)
1312 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1313 avx
= (have_ptrace_getregset
1314 && (xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
);
1316 /* AVX is the highest feature we support. */
1320 if (machine
== EM_X86_64
)
1326 return tdesc_x32_avx_linux
;
1328 return tdesc_amd64_avx_linux
;
1333 return tdesc_x32_linux
;
1335 return tdesc_amd64_linux
;
1342 return tdesc_i386_avx_linux
;
1344 return tdesc_i386_linux
;
1347 gdb_assert_not_reached ("failed to return tdesc");
1350 /* Callback for find_inferior. Stops iteration when a thread with a
1351 given PID is found. */
1354 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1356 int pid
= *(int *) data
;
1358 return (ptid_get_pid (entry
->id
) == pid
);
1361 /* Callback for for_each_inferior. Calls the arch_setup routine for
1365 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1367 int pid
= ptid_get_pid (entry
->id
);
1369 /* Look up any thread of this processes. */
1371 = (struct thread_info
*) find_inferior (&all_threads
,
1372 same_process_callback
, &pid
);
1374 the_low_target
.arch_setup ();
1377 /* Update all the target description of all processes; a new GDB
1378 connected, and it may or not support xml target descriptions. */
1381 x86_linux_update_xmltarget (void)
1383 struct thread_info
*save_inferior
= current_inferior
;
1385 /* Before changing the register cache's internal layout, flush the
1386 contents of the current valid caches back to the threads, and
1387 release the current regcache objects. */
1388 regcache_release ();
1390 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1392 current_inferior
= save_inferior
;
1395 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1396 PTRACE_GETREGSET. */
1399 x86_linux_process_qsupported (const char *query
)
1401 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1402 with "i386" in qSupported query, it supports x86 XML target
1405 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1407 char *copy
= xstrdup (query
+ 13);
1410 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1412 if (strcmp (p
, "i386") == 0)
1422 x86_linux_update_xmltarget ();
1425 /* Common for x86/x86-64. */
1427 static struct regsets_info x86_regsets_info
=
1429 x86_regsets
, /* regsets */
1430 0, /* num_regsets */
1431 NULL
, /* disabled_regsets */
1435 static struct regs_info amd64_linux_regs_info
=
1437 NULL
, /* regset_bitmap */
1438 NULL
, /* usrregs_info */
1442 static struct usrregs_info i386_linux_usrregs_info
=
1448 static struct regs_info i386_linux_regs_info
=
1450 NULL
, /* regset_bitmap */
1451 &i386_linux_usrregs_info
,
1455 const struct regs_info
*
1456 x86_linux_regs_info (void)
1459 if (is_64bit_tdesc ())
1460 return &amd64_linux_regs_info
;
1463 return &i386_linux_regs_info
;
1466 /* Initialize the target description for the architecture of the
1470 x86_arch_setup (void)
1472 current_process ()->tdesc
= x86_linux_read_description ();
1476 x86_supports_tracepoints (void)
1482 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1484 write_inferior_memory (*to
, buf
, len
);
1489 push_opcode (unsigned char *buf
, char *op
)
1491 unsigned char *buf_org
= buf
;
1496 unsigned long ul
= strtoul (op
, &endptr
, 16);
1505 return buf
- buf_org
;
1510 /* Build a jump pad that saves registers and calls a collection
1511 function. Writes a jump instruction to the jump pad to
1512 JJUMPAD_INSN. The caller is responsible to write it in at the
1513 tracepoint address. */
1516 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1517 CORE_ADDR collector
,
1520 CORE_ADDR
*jump_entry
,
1521 CORE_ADDR
*trampoline
,
1522 ULONGEST
*trampoline_size
,
1523 unsigned char *jjump_pad_insn
,
1524 ULONGEST
*jjump_pad_insn_size
,
1525 CORE_ADDR
*adjusted_insn_addr
,
1526 CORE_ADDR
*adjusted_insn_addr_end
,
1529 unsigned char buf
[40];
1533 CORE_ADDR buildaddr
= *jump_entry
;
1535 /* Build the jump pad. */
1537 /* First, do tracepoint data collection. Save registers. */
1539 /* Need to ensure stack pointer saved first. */
1540 buf
[i
++] = 0x54; /* push %rsp */
1541 buf
[i
++] = 0x55; /* push %rbp */
1542 buf
[i
++] = 0x57; /* push %rdi */
1543 buf
[i
++] = 0x56; /* push %rsi */
1544 buf
[i
++] = 0x52; /* push %rdx */
1545 buf
[i
++] = 0x51; /* push %rcx */
1546 buf
[i
++] = 0x53; /* push %rbx */
1547 buf
[i
++] = 0x50; /* push %rax */
1548 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1549 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1550 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1551 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1552 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1553 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1554 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1555 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1556 buf
[i
++] = 0x9c; /* pushfq */
1557 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1559 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1560 i
+= sizeof (unsigned long);
1561 buf
[i
++] = 0x57; /* push %rdi */
1562 append_insns (&buildaddr
, i
, buf
);
1564 /* Stack space for the collecting_t object. */
1566 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1567 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1568 memcpy (buf
+ i
, &tpoint
, 8);
1570 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1571 i
+= push_opcode (&buf
[i
],
1572 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1573 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1574 append_insns (&buildaddr
, i
, buf
);
1578 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1579 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1581 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1582 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1583 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1584 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1585 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1586 append_insns (&buildaddr
, i
, buf
);
1588 /* Set up the gdb_collect call. */
1589 /* At this point, (stack pointer + 0x18) is the base of our saved
1593 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1594 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1596 /* tpoint address may be 64-bit wide. */
1597 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1598 memcpy (buf
+ i
, &tpoint
, 8);
1600 append_insns (&buildaddr
, i
, buf
);
1602 /* The collector function being in the shared library, may be
1603 >31-bits away off the jump pad. */
1605 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1606 memcpy (buf
+ i
, &collector
, 8);
1608 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1609 append_insns (&buildaddr
, i
, buf
);
1611 /* Clear the spin-lock. */
1613 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1614 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1615 memcpy (buf
+ i
, &lockaddr
, 8);
1617 append_insns (&buildaddr
, i
, buf
);
1619 /* Remove stack that had been used for the collect_t object. */
1621 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1622 append_insns (&buildaddr
, i
, buf
);
1624 /* Restore register state. */
1626 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1630 buf
[i
++] = 0x9d; /* popfq */
1631 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1632 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1633 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1634 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1635 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1636 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1637 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1638 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1639 buf
[i
++] = 0x58; /* pop %rax */
1640 buf
[i
++] = 0x5b; /* pop %rbx */
1641 buf
[i
++] = 0x59; /* pop %rcx */
1642 buf
[i
++] = 0x5a; /* pop %rdx */
1643 buf
[i
++] = 0x5e; /* pop %rsi */
1644 buf
[i
++] = 0x5f; /* pop %rdi */
1645 buf
[i
++] = 0x5d; /* pop %rbp */
1646 buf
[i
++] = 0x5c; /* pop %rsp */
1647 append_insns (&buildaddr
, i
, buf
);
1649 /* Now, adjust the original instruction to execute in the jump
1651 *adjusted_insn_addr
= buildaddr
;
1652 relocate_instruction (&buildaddr
, tpaddr
);
1653 *adjusted_insn_addr_end
= buildaddr
;
1655 /* Finally, write a jump back to the program. */
1657 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1658 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1661 "E.Jump back from jump pad too far from tracepoint "
1662 "(offset 0x%" PRIx64
" > int32).", loffset
);
1666 offset
= (int) loffset
;
1667 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1668 memcpy (buf
+ 1, &offset
, 4);
1669 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1671 /* The jump pad is now built. Wire in a jump to our jump pad. This
1672 is always done last (by our caller actually), so that we can
1673 install fast tracepoints with threads running. This relies on
1674 the agent's atomic write support. */
1675 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1676 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1679 "E.Jump pad too far from tracepoint "
1680 "(offset 0x%" PRIx64
" > int32).", loffset
);
1684 offset
= (int) loffset
;
1686 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1687 memcpy (buf
+ 1, &offset
, 4);
1688 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1689 *jjump_pad_insn_size
= sizeof (jump_insn
);
1691 /* Return the end address of our pad. */
1692 *jump_entry
= buildaddr
;
1697 #endif /* __x86_64__ */
1699 /* Build a jump pad that saves registers and calls a collection
1700 function. Writes a jump instruction to the jump pad to
1701 JJUMPAD_INSN. The caller is responsible to write it in at the
1702 tracepoint address. */
1705 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1706 CORE_ADDR collector
,
1709 CORE_ADDR
*jump_entry
,
1710 CORE_ADDR
*trampoline
,
1711 ULONGEST
*trampoline_size
,
1712 unsigned char *jjump_pad_insn
,
1713 ULONGEST
*jjump_pad_insn_size
,
1714 CORE_ADDR
*adjusted_insn_addr
,
1715 CORE_ADDR
*adjusted_insn_addr_end
,
1718 unsigned char buf
[0x100];
1720 CORE_ADDR buildaddr
= *jump_entry
;
1722 /* Build the jump pad. */
1724 /* First, do tracepoint data collection. Save registers. */
1726 buf
[i
++] = 0x60; /* pushad */
1727 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1728 *((int *)(buf
+ i
)) = (int) tpaddr
;
1730 buf
[i
++] = 0x9c; /* pushf */
1731 buf
[i
++] = 0x1e; /* push %ds */
1732 buf
[i
++] = 0x06; /* push %es */
1733 buf
[i
++] = 0x0f; /* push %fs */
1735 buf
[i
++] = 0x0f; /* push %gs */
1737 buf
[i
++] = 0x16; /* push %ss */
1738 buf
[i
++] = 0x0e; /* push %cs */
1739 append_insns (&buildaddr
, i
, buf
);
1741 /* Stack space for the collecting_t object. */
1743 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1745 /* Build the object. */
1746 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1747 memcpy (buf
+ i
, &tpoint
, 4);
1749 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1751 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1752 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1753 append_insns (&buildaddr
, i
, buf
);
1755 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1756 If we cared for it, this could be using xchg alternatively. */
1759 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1760 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1762 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1764 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1765 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1766 append_insns (&buildaddr
, i
, buf
);
1769 /* Set up arguments to the gdb_collect call. */
1771 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1772 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1773 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1774 append_insns (&buildaddr
, i
, buf
);
1777 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1778 append_insns (&buildaddr
, i
, buf
);
1781 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1782 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1784 append_insns (&buildaddr
, i
, buf
);
1786 buf
[0] = 0xe8; /* call <reladdr> */
1787 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1788 memcpy (buf
+ 1, &offset
, 4);
1789 append_insns (&buildaddr
, 5, buf
);
1790 /* Clean up after the call. */
1791 buf
[0] = 0x83; /* add $0x8,%esp */
1794 append_insns (&buildaddr
, 3, buf
);
1797 /* Clear the spin-lock. This would need the LOCK prefix on older
1800 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1801 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1802 memcpy (buf
+ i
, &lockaddr
, 4);
1804 append_insns (&buildaddr
, i
, buf
);
1807 /* Remove stack that had been used for the collect_t object. */
1809 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1810 append_insns (&buildaddr
, i
, buf
);
1813 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1816 buf
[i
++] = 0x17; /* pop %ss */
1817 buf
[i
++] = 0x0f; /* pop %gs */
1819 buf
[i
++] = 0x0f; /* pop %fs */
1821 buf
[i
++] = 0x07; /* pop %es */
1822 buf
[i
++] = 0x1f; /* pop %ds */
1823 buf
[i
++] = 0x9d; /* popf */
1824 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1827 buf
[i
++] = 0x61; /* popad */
1828 append_insns (&buildaddr
, i
, buf
);
1830 /* Now, adjust the original instruction to execute in the jump
1832 *adjusted_insn_addr
= buildaddr
;
1833 relocate_instruction (&buildaddr
, tpaddr
);
1834 *adjusted_insn_addr_end
= buildaddr
;
1836 /* Write the jump back to the program. */
1837 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1838 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1839 memcpy (buf
+ 1, &offset
, 4);
1840 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1842 /* The jump pad is now built. Wire in a jump to our jump pad. This
1843 is always done last (by our caller actually), so that we can
1844 install fast tracepoints with threads running. This relies on
1845 the agent's atomic write support. */
1848 /* Create a trampoline. */
1849 *trampoline_size
= sizeof (jump_insn
);
1850 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1852 /* No trampoline space available. */
1854 "E.Cannot allocate trampoline space needed for fast "
1855 "tracepoints on 4-byte instructions.");
1859 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1860 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1861 memcpy (buf
+ 1, &offset
, 4);
1862 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1864 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1865 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1866 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1867 memcpy (buf
+ 2, &offset
, 2);
1868 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1869 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1873 /* Else use a 32-bit relative jump instruction. */
1874 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1875 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1876 memcpy (buf
+ 1, &offset
, 4);
1877 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1878 *jjump_pad_insn_size
= sizeof (jump_insn
);
1881 /* Return the end address of our pad. */
1882 *jump_entry
= buildaddr
;
1888 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1889 CORE_ADDR collector
,
1892 CORE_ADDR
*jump_entry
,
1893 CORE_ADDR
*trampoline
,
1894 ULONGEST
*trampoline_size
,
1895 unsigned char *jjump_pad_insn
,
1896 ULONGEST
*jjump_pad_insn_size
,
1897 CORE_ADDR
*adjusted_insn_addr
,
1898 CORE_ADDR
*adjusted_insn_addr_end
,
1902 if (is_64bit_tdesc ())
1903 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1904 collector
, lockaddr
,
1905 orig_size
, jump_entry
,
1906 trampoline
, trampoline_size
,
1908 jjump_pad_insn_size
,
1910 adjusted_insn_addr_end
,
1914 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1915 collector
, lockaddr
,
1916 orig_size
, jump_entry
,
1917 trampoline
, trampoline_size
,
1919 jjump_pad_insn_size
,
1921 adjusted_insn_addr_end
,
1925 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1929 x86_get_min_fast_tracepoint_insn_len (void)
1931 static int warned_about_fast_tracepoints
= 0;
1934 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1935 used for fast tracepoints. */
1936 if (is_64bit_tdesc ())
1940 if (agent_loaded_p ())
1942 char errbuf
[IPA_BUFSIZ
];
1946 /* On x86, if trampolines are available, then 4-byte jump instructions
1947 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1948 with a 4-byte offset are used instead. */
1949 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1953 /* GDB has no channel to explain to user why a shorter fast
1954 tracepoint is not possible, but at least make GDBserver
1955 mention that something has gone awry. */
1956 if (!warned_about_fast_tracepoints
)
1958 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1959 warned_about_fast_tracepoints
= 1;
1966 /* Indicate that the minimum length is currently unknown since the IPA
1967 has not loaded yet. */
1973 add_insns (unsigned char *start
, int len
)
1975 CORE_ADDR buildaddr
= current_insn_ptr
;
1978 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1979 len
, paddress (buildaddr
));
1981 append_insns (&buildaddr
, len
, start
);
1982 current_insn_ptr
= buildaddr
;
1985 /* Our general strategy for emitting code is to avoid specifying raw
1986 bytes whenever possible, and instead copy a block of inline asm
1987 that is embedded in the function. This is a little messy, because
1988 we need to keep the compiler from discarding what looks like dead
1989 code, plus suppress various warnings. */
1991 #define EMIT_ASM(NAME, INSNS) \
1994 extern unsigned char start_ ## NAME, end_ ## NAME; \
1995 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1996 __asm__ ("jmp end_" #NAME "\n" \
1997 "\t" "start_" #NAME ":" \
1999 "\t" "end_" #NAME ":"); \
2004 #define EMIT_ASM32(NAME,INSNS) \
2007 extern unsigned char start_ ## NAME, end_ ## NAME; \
2008 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2009 __asm__ (".code32\n" \
2010 "\t" "jmp end_" #NAME "\n" \
2011 "\t" "start_" #NAME ":\n" \
2013 "\t" "end_" #NAME ":\n" \
2019 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2026 amd64_emit_prologue (void)
2028 EMIT_ASM (amd64_prologue
,
2030 "movq %rsp,%rbp\n\t"
2031 "sub $0x20,%rsp\n\t"
2032 "movq %rdi,-8(%rbp)\n\t"
2033 "movq %rsi,-16(%rbp)");
2038 amd64_emit_epilogue (void)
2040 EMIT_ASM (amd64_epilogue
,
2041 "movq -16(%rbp),%rdi\n\t"
2042 "movq %rax,(%rdi)\n\t"
2049 amd64_emit_add (void)
2051 EMIT_ASM (amd64_add
,
2052 "add (%rsp),%rax\n\t"
2053 "lea 0x8(%rsp),%rsp");
2057 amd64_emit_sub (void)
2059 EMIT_ASM (amd64_sub
,
2060 "sub %rax,(%rsp)\n\t"
2065 amd64_emit_mul (void)
2071 amd64_emit_lsh (void)
2077 amd64_emit_rsh_signed (void)
2083 amd64_emit_rsh_unsigned (void)
2089 amd64_emit_ext (int arg
)
2094 EMIT_ASM (amd64_ext_8
,
2100 EMIT_ASM (amd64_ext_16
,
2105 EMIT_ASM (amd64_ext_32
,
2114 amd64_emit_log_not (void)
2116 EMIT_ASM (amd64_log_not
,
2117 "test %rax,%rax\n\t"
2123 amd64_emit_bit_and (void)
2125 EMIT_ASM (amd64_and
,
2126 "and (%rsp),%rax\n\t"
2127 "lea 0x8(%rsp),%rsp");
2131 amd64_emit_bit_or (void)
2134 "or (%rsp),%rax\n\t"
2135 "lea 0x8(%rsp),%rsp");
2139 amd64_emit_bit_xor (void)
2141 EMIT_ASM (amd64_xor
,
2142 "xor (%rsp),%rax\n\t"
2143 "lea 0x8(%rsp),%rsp");
2147 amd64_emit_bit_not (void)
2149 EMIT_ASM (amd64_bit_not
,
2150 "xorq $0xffffffffffffffff,%rax");
2154 amd64_emit_equal (void)
2156 EMIT_ASM (amd64_equal
,
2157 "cmp %rax,(%rsp)\n\t"
2158 "je .Lamd64_equal_true\n\t"
2160 "jmp .Lamd64_equal_end\n\t"
2161 ".Lamd64_equal_true:\n\t"
2163 ".Lamd64_equal_end:\n\t"
2164 "lea 0x8(%rsp),%rsp");
2168 amd64_emit_less_signed (void)
2170 EMIT_ASM (amd64_less_signed
,
2171 "cmp %rax,(%rsp)\n\t"
2172 "jl .Lamd64_less_signed_true\n\t"
2174 "jmp .Lamd64_less_signed_end\n\t"
2175 ".Lamd64_less_signed_true:\n\t"
2177 ".Lamd64_less_signed_end:\n\t"
2178 "lea 0x8(%rsp),%rsp");
2182 amd64_emit_less_unsigned (void)
2184 EMIT_ASM (amd64_less_unsigned
,
2185 "cmp %rax,(%rsp)\n\t"
2186 "jb .Lamd64_less_unsigned_true\n\t"
2188 "jmp .Lamd64_less_unsigned_end\n\t"
2189 ".Lamd64_less_unsigned_true:\n\t"
2191 ".Lamd64_less_unsigned_end:\n\t"
2192 "lea 0x8(%rsp),%rsp");
2196 amd64_emit_ref (int size
)
2201 EMIT_ASM (amd64_ref1
,
2205 EMIT_ASM (amd64_ref2
,
2209 EMIT_ASM (amd64_ref4
,
2210 "movl (%rax),%eax");
2213 EMIT_ASM (amd64_ref8
,
2214 "movq (%rax),%rax");
2220 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2222 EMIT_ASM (amd64_if_goto
,
2226 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2234 amd64_emit_goto (int *offset_p
, int *size_p
)
2236 EMIT_ASM (amd64_goto
,
2237 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2245 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2247 int diff
= (to
- (from
+ size
));
2248 unsigned char buf
[sizeof (int)];
2256 memcpy (buf
, &diff
, sizeof (int));
2257 write_inferior_memory (from
, buf
, sizeof (int));
2261 amd64_emit_const (LONGEST num
)
2263 unsigned char buf
[16];
2265 CORE_ADDR buildaddr
= current_insn_ptr
;
2268 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2269 memcpy (&buf
[i
], &num
, sizeof (num
));
2271 append_insns (&buildaddr
, i
, buf
);
2272 current_insn_ptr
= buildaddr
;
2276 amd64_emit_call (CORE_ADDR fn
)
2278 unsigned char buf
[16];
2280 CORE_ADDR buildaddr
;
2283 /* The destination function being in the shared library, may be
2284 >31-bits away off the compiled code pad. */
2286 buildaddr
= current_insn_ptr
;
2288 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2292 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2294 /* Offset is too large for a call. Use callq, but that requires
2295 a register, so avoid it if possible. Use r10, since it is
2296 call-clobbered, we don't have to push/pop it. */
2297 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2299 memcpy (buf
+ i
, &fn
, 8);
2301 buf
[i
++] = 0xff; /* callq *%r10 */
2306 int offset32
= offset64
; /* we know we can't overflow here. */
2307 memcpy (buf
+ i
, &offset32
, 4);
2311 append_insns (&buildaddr
, i
, buf
);
2312 current_insn_ptr
= buildaddr
;
2316 amd64_emit_reg (int reg
)
2318 unsigned char buf
[16];
2320 CORE_ADDR buildaddr
;
2322 /* Assume raw_regs is still in %rdi. */
2323 buildaddr
= current_insn_ptr
;
2325 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2326 memcpy (&buf
[i
], ®
, sizeof (reg
));
2328 append_insns (&buildaddr
, i
, buf
);
2329 current_insn_ptr
= buildaddr
;
2330 amd64_emit_call (get_raw_reg_func_addr ());
2334 amd64_emit_pop (void)
2336 EMIT_ASM (amd64_pop
,
2341 amd64_emit_stack_flush (void)
2343 EMIT_ASM (amd64_stack_flush
,
2348 amd64_emit_zero_ext (int arg
)
2353 EMIT_ASM (amd64_zero_ext_8
,
2357 EMIT_ASM (amd64_zero_ext_16
,
2358 "and $0xffff,%rax");
2361 EMIT_ASM (amd64_zero_ext_32
,
2362 "mov $0xffffffff,%rcx\n\t"
2371 amd64_emit_swap (void)
2373 EMIT_ASM (amd64_swap
,
2380 amd64_emit_stack_adjust (int n
)
2382 unsigned char buf
[16];
2384 CORE_ADDR buildaddr
= current_insn_ptr
;
2387 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2391 /* This only handles adjustments up to 16, but we don't expect any more. */
2393 append_insns (&buildaddr
, i
, buf
);
2394 current_insn_ptr
= buildaddr
;
2397 /* FN's prototype is `LONGEST(*fn)(int)'. */
2400 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2402 unsigned char buf
[16];
2404 CORE_ADDR buildaddr
;
2406 buildaddr
= current_insn_ptr
;
2408 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2409 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2411 append_insns (&buildaddr
, i
, buf
);
2412 current_insn_ptr
= buildaddr
;
2413 amd64_emit_call (fn
);
2416 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2419 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2421 unsigned char buf
[16];
2423 CORE_ADDR buildaddr
;
2425 buildaddr
= current_insn_ptr
;
2427 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2428 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2430 append_insns (&buildaddr
, i
, buf
);
2431 current_insn_ptr
= buildaddr
;
2432 EMIT_ASM (amd64_void_call_2_a
,
2433 /* Save away a copy of the stack top. */
2435 /* Also pass top as the second argument. */
2437 amd64_emit_call (fn
);
2438 EMIT_ASM (amd64_void_call_2_b
,
2439 /* Restore the stack top, %rax may have been trashed. */
2444 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2447 "cmp %rax,(%rsp)\n\t"
2448 "jne .Lamd64_eq_fallthru\n\t"
2449 "lea 0x8(%rsp),%rsp\n\t"
2451 /* jmp, but don't trust the assembler to choose the right jump */
2452 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2453 ".Lamd64_eq_fallthru:\n\t"
2454 "lea 0x8(%rsp),%rsp\n\t"
2464 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2467 "cmp %rax,(%rsp)\n\t"
2468 "je .Lamd64_ne_fallthru\n\t"
2469 "lea 0x8(%rsp),%rsp\n\t"
2471 /* jmp, but don't trust the assembler to choose the right jump */
2472 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2473 ".Lamd64_ne_fallthru:\n\t"
2474 "lea 0x8(%rsp),%rsp\n\t"
2484 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2487 "cmp %rax,(%rsp)\n\t"
2488 "jnl .Lamd64_lt_fallthru\n\t"
2489 "lea 0x8(%rsp),%rsp\n\t"
2491 /* jmp, but don't trust the assembler to choose the right jump */
2492 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2493 ".Lamd64_lt_fallthru:\n\t"
2494 "lea 0x8(%rsp),%rsp\n\t"
2504 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2507 "cmp %rax,(%rsp)\n\t"
2508 "jnle .Lamd64_le_fallthru\n\t"
2509 "lea 0x8(%rsp),%rsp\n\t"
2511 /* jmp, but don't trust the assembler to choose the right jump */
2512 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2513 ".Lamd64_le_fallthru:\n\t"
2514 "lea 0x8(%rsp),%rsp\n\t"
2524 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2527 "cmp %rax,(%rsp)\n\t"
2528 "jng .Lamd64_gt_fallthru\n\t"
2529 "lea 0x8(%rsp),%rsp\n\t"
2531 /* jmp, but don't trust the assembler to choose the right jump */
2532 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2533 ".Lamd64_gt_fallthru:\n\t"
2534 "lea 0x8(%rsp),%rsp\n\t"
2544 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2547 "cmp %rax,(%rsp)\n\t"
2548 "jnge .Lamd64_ge_fallthru\n\t"
2549 ".Lamd64_ge_jump:\n\t"
2550 "lea 0x8(%rsp),%rsp\n\t"
2552 /* jmp, but don't trust the assembler to choose the right jump */
2553 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2554 ".Lamd64_ge_fallthru:\n\t"
2555 "lea 0x8(%rsp),%rsp\n\t"
2564 struct emit_ops amd64_emit_ops
=
2566 amd64_emit_prologue
,
2567 amd64_emit_epilogue
,
2572 amd64_emit_rsh_signed
,
2573 amd64_emit_rsh_unsigned
,
2581 amd64_emit_less_signed
,
2582 amd64_emit_less_unsigned
,
2586 amd64_write_goto_address
,
2591 amd64_emit_stack_flush
,
2592 amd64_emit_zero_ext
,
2594 amd64_emit_stack_adjust
,
2595 amd64_emit_int_call_1
,
2596 amd64_emit_void_call_2
,
2605 #endif /* __x86_64__ */
2608 i386_emit_prologue (void)
2610 EMIT_ASM32 (i386_prologue
,
2614 /* At this point, the raw regs base address is at 8(%ebp), and the
2615 value pointer is at 12(%ebp). */
2619 i386_emit_epilogue (void)
2621 EMIT_ASM32 (i386_epilogue
,
2622 "mov 12(%ebp),%ecx\n\t"
2623 "mov %eax,(%ecx)\n\t"
2624 "mov %ebx,0x4(%ecx)\n\t"
2632 i386_emit_add (void)
2634 EMIT_ASM32 (i386_add
,
2635 "add (%esp),%eax\n\t"
2636 "adc 0x4(%esp),%ebx\n\t"
2637 "lea 0x8(%esp),%esp");
2641 i386_emit_sub (void)
2643 EMIT_ASM32 (i386_sub
,
2644 "subl %eax,(%esp)\n\t"
2645 "sbbl %ebx,4(%esp)\n\t"
2651 i386_emit_mul (void)
2657 i386_emit_lsh (void)
2663 i386_emit_rsh_signed (void)
2669 i386_emit_rsh_unsigned (void)
2675 i386_emit_ext (int arg
)
2680 EMIT_ASM32 (i386_ext_8
,
2683 "movl %eax,%ebx\n\t"
2687 EMIT_ASM32 (i386_ext_16
,
2689 "movl %eax,%ebx\n\t"
2693 EMIT_ASM32 (i386_ext_32
,
2694 "movl %eax,%ebx\n\t"
2703 i386_emit_log_not (void)
2705 EMIT_ASM32 (i386_log_not
,
2707 "test %eax,%eax\n\t"
2714 i386_emit_bit_and (void)
2716 EMIT_ASM32 (i386_and
,
2717 "and (%esp),%eax\n\t"
2718 "and 0x4(%esp),%ebx\n\t"
2719 "lea 0x8(%esp),%esp");
2723 i386_emit_bit_or (void)
2725 EMIT_ASM32 (i386_or
,
2726 "or (%esp),%eax\n\t"
2727 "or 0x4(%esp),%ebx\n\t"
2728 "lea 0x8(%esp),%esp");
2732 i386_emit_bit_xor (void)
2734 EMIT_ASM32 (i386_xor
,
2735 "xor (%esp),%eax\n\t"
2736 "xor 0x4(%esp),%ebx\n\t"
2737 "lea 0x8(%esp),%esp");
2741 i386_emit_bit_not (void)
2743 EMIT_ASM32 (i386_bit_not
,
2744 "xor $0xffffffff,%eax\n\t"
2745 "xor $0xffffffff,%ebx\n\t");
2749 i386_emit_equal (void)
2751 EMIT_ASM32 (i386_equal
,
2752 "cmpl %ebx,4(%esp)\n\t"
2753 "jne .Li386_equal_false\n\t"
2754 "cmpl %eax,(%esp)\n\t"
2755 "je .Li386_equal_true\n\t"
2756 ".Li386_equal_false:\n\t"
2758 "jmp .Li386_equal_end\n\t"
2759 ".Li386_equal_true:\n\t"
2761 ".Li386_equal_end:\n\t"
2763 "lea 0x8(%esp),%esp");
2767 i386_emit_less_signed (void)
2769 EMIT_ASM32 (i386_less_signed
,
2770 "cmpl %ebx,4(%esp)\n\t"
2771 "jl .Li386_less_signed_true\n\t"
2772 "jne .Li386_less_signed_false\n\t"
2773 "cmpl %eax,(%esp)\n\t"
2774 "jl .Li386_less_signed_true\n\t"
2775 ".Li386_less_signed_false:\n\t"
2777 "jmp .Li386_less_signed_end\n\t"
2778 ".Li386_less_signed_true:\n\t"
2780 ".Li386_less_signed_end:\n\t"
2782 "lea 0x8(%esp),%esp");
2786 i386_emit_less_unsigned (void)
2788 EMIT_ASM32 (i386_less_unsigned
,
2789 "cmpl %ebx,4(%esp)\n\t"
2790 "jb .Li386_less_unsigned_true\n\t"
2791 "jne .Li386_less_unsigned_false\n\t"
2792 "cmpl %eax,(%esp)\n\t"
2793 "jb .Li386_less_unsigned_true\n\t"
2794 ".Li386_less_unsigned_false:\n\t"
2796 "jmp .Li386_less_unsigned_end\n\t"
2797 ".Li386_less_unsigned_true:\n\t"
2799 ".Li386_less_unsigned_end:\n\t"
2801 "lea 0x8(%esp),%esp");
2805 i386_emit_ref (int size
)
2810 EMIT_ASM32 (i386_ref1
,
2814 EMIT_ASM32 (i386_ref2
,
2818 EMIT_ASM32 (i386_ref4
,
2819 "movl (%eax),%eax");
2822 EMIT_ASM32 (i386_ref8
,
2823 "movl 4(%eax),%ebx\n\t"
2824 "movl (%eax),%eax");
2830 i386_emit_if_goto (int *offset_p
, int *size_p
)
2832 EMIT_ASM32 (i386_if_goto
,
2838 /* Don't trust the assembler to choose the right jump */
2839 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2842 *offset_p
= 11; /* be sure that this matches the sequence above */
2848 i386_emit_goto (int *offset_p
, int *size_p
)
2850 EMIT_ASM32 (i386_goto
,
2851 /* Don't trust the assembler to choose the right jump */
2852 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2860 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2862 int diff
= (to
- (from
+ size
));
2863 unsigned char buf
[sizeof (int)];
2865 /* We're only doing 4-byte sizes at the moment. */
2872 memcpy (buf
, &diff
, sizeof (int));
2873 write_inferior_memory (from
, buf
, sizeof (int));
2877 i386_emit_const (LONGEST num
)
2879 unsigned char buf
[16];
2881 CORE_ADDR buildaddr
= current_insn_ptr
;
2884 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2885 lo
= num
& 0xffffffff;
2886 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2888 hi
= ((num
>> 32) & 0xffffffff);
2891 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2892 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2897 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2899 append_insns (&buildaddr
, i
, buf
);
2900 current_insn_ptr
= buildaddr
;
2904 i386_emit_call (CORE_ADDR fn
)
2906 unsigned char buf
[16];
2908 CORE_ADDR buildaddr
;
2910 buildaddr
= current_insn_ptr
;
2912 buf
[i
++] = 0xe8; /* call <reladdr> */
2913 offset
= ((int) fn
) - (buildaddr
+ 5);
2914 memcpy (buf
+ 1, &offset
, 4);
2915 append_insns (&buildaddr
, 5, buf
);
2916 current_insn_ptr
= buildaddr
;
2920 i386_emit_reg (int reg
)
2922 unsigned char buf
[16];
2924 CORE_ADDR buildaddr
;
2926 EMIT_ASM32 (i386_reg_a
,
2928 buildaddr
= current_insn_ptr
;
2930 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2931 memcpy (&buf
[i
], ®
, sizeof (reg
));
2933 append_insns (&buildaddr
, i
, buf
);
2934 current_insn_ptr
= buildaddr
;
2935 EMIT_ASM32 (i386_reg_b
,
2936 "mov %eax,4(%esp)\n\t"
2937 "mov 8(%ebp),%eax\n\t"
2939 i386_emit_call (get_raw_reg_func_addr ());
2940 EMIT_ASM32 (i386_reg_c
,
2942 "lea 0x8(%esp),%esp");
2946 i386_emit_pop (void)
2948 EMIT_ASM32 (i386_pop
,
2954 i386_emit_stack_flush (void)
2956 EMIT_ASM32 (i386_stack_flush
,
2962 i386_emit_zero_ext (int arg
)
2967 EMIT_ASM32 (i386_zero_ext_8
,
2968 "and $0xff,%eax\n\t"
2972 EMIT_ASM32 (i386_zero_ext_16
,
2973 "and $0xffff,%eax\n\t"
2977 EMIT_ASM32 (i386_zero_ext_32
,
2986 i386_emit_swap (void)
2988 EMIT_ASM32 (i386_swap
,
2998 i386_emit_stack_adjust (int n
)
3000 unsigned char buf
[16];
3002 CORE_ADDR buildaddr
= current_insn_ptr
;
3005 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3009 append_insns (&buildaddr
, i
, buf
);
3010 current_insn_ptr
= buildaddr
;
3013 /* FN's prototype is `LONGEST(*fn)(int)'. */
3016 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3018 unsigned char buf
[16];
3020 CORE_ADDR buildaddr
;
3022 EMIT_ASM32 (i386_int_call_1_a
,
3023 /* Reserve a bit of stack space. */
3025 /* Put the one argument on the stack. */
3026 buildaddr
= current_insn_ptr
;
3028 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3031 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3033 append_insns (&buildaddr
, i
, buf
);
3034 current_insn_ptr
= buildaddr
;
3035 i386_emit_call (fn
);
3036 EMIT_ASM32 (i386_int_call_1_c
,
3038 "lea 0x8(%esp),%esp");
3041 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3044 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3046 unsigned char buf
[16];
3048 CORE_ADDR buildaddr
;
3050 EMIT_ASM32 (i386_void_call_2_a
,
3051 /* Preserve %eax only; we don't have to worry about %ebx. */
3053 /* Reserve a bit of stack space for arguments. */
3054 "sub $0x10,%esp\n\t"
3055 /* Copy "top" to the second argument position. (Note that
3056 we can't assume function won't scribble on its
3057 arguments, so don't try to restore from this.) */
3058 "mov %eax,4(%esp)\n\t"
3059 "mov %ebx,8(%esp)");
3060 /* Put the first argument on the stack. */
3061 buildaddr
= current_insn_ptr
;
3063 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3066 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3068 append_insns (&buildaddr
, i
, buf
);
3069 current_insn_ptr
= buildaddr
;
3070 i386_emit_call (fn
);
3071 EMIT_ASM32 (i386_void_call_2_b
,
3072 "lea 0x10(%esp),%esp\n\t"
3073 /* Restore original stack top. */
3079 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3082 /* Check low half first, more likely to be decider */
3083 "cmpl %eax,(%esp)\n\t"
3084 "jne .Leq_fallthru\n\t"
3085 "cmpl %ebx,4(%esp)\n\t"
3086 "jne .Leq_fallthru\n\t"
3087 "lea 0x8(%esp),%esp\n\t"
3090 /* jmp, but don't trust the assembler to choose the right jump */
3091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3092 ".Leq_fallthru:\n\t"
3093 "lea 0x8(%esp),%esp\n\t"
3104 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3107 /* Check low half first, more likely to be decider */
3108 "cmpl %eax,(%esp)\n\t"
3110 "cmpl %ebx,4(%esp)\n\t"
3111 "je .Lne_fallthru\n\t"
3113 "lea 0x8(%esp),%esp\n\t"
3116 /* jmp, but don't trust the assembler to choose the right jump */
3117 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3118 ".Lne_fallthru:\n\t"
3119 "lea 0x8(%esp),%esp\n\t"
3130 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3133 "cmpl %ebx,4(%esp)\n\t"
3135 "jne .Llt_fallthru\n\t"
3136 "cmpl %eax,(%esp)\n\t"
3137 "jnl .Llt_fallthru\n\t"
3139 "lea 0x8(%esp),%esp\n\t"
3142 /* jmp, but don't trust the assembler to choose the right jump */
3143 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3144 ".Llt_fallthru:\n\t"
3145 "lea 0x8(%esp),%esp\n\t"
3156 i386_emit_le_goto (int *offset_p
, int *size_p
)
3159 "cmpl %ebx,4(%esp)\n\t"
3161 "jne .Lle_fallthru\n\t"
3162 "cmpl %eax,(%esp)\n\t"
3163 "jnle .Lle_fallthru\n\t"
3165 "lea 0x8(%esp),%esp\n\t"
3168 /* jmp, but don't trust the assembler to choose the right jump */
3169 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3170 ".Lle_fallthru:\n\t"
3171 "lea 0x8(%esp),%esp\n\t"
3182 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3185 "cmpl %ebx,4(%esp)\n\t"
3187 "jne .Lgt_fallthru\n\t"
3188 "cmpl %eax,(%esp)\n\t"
3189 "jng .Lgt_fallthru\n\t"
3191 "lea 0x8(%esp),%esp\n\t"
3194 /* jmp, but don't trust the assembler to choose the right jump */
3195 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3196 ".Lgt_fallthru:\n\t"
3197 "lea 0x8(%esp),%esp\n\t"
3208 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3211 "cmpl %ebx,4(%esp)\n\t"
3213 "jne .Lge_fallthru\n\t"
3214 "cmpl %eax,(%esp)\n\t"
3215 "jnge .Lge_fallthru\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3220 /* jmp, but don't trust the assembler to choose the right jump */
3221 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3222 ".Lge_fallthru:\n\t"
3223 "lea 0x8(%esp),%esp\n\t"
3233 struct emit_ops i386_emit_ops
=
3241 i386_emit_rsh_signed
,
3242 i386_emit_rsh_unsigned
,
3250 i386_emit_less_signed
,
3251 i386_emit_less_unsigned
,
3255 i386_write_goto_address
,
3260 i386_emit_stack_flush
,
3263 i386_emit_stack_adjust
,
3264 i386_emit_int_call_1
,
3265 i386_emit_void_call_2
,
3275 static struct emit_ops
*
3279 if (is_64bit_tdesc ())
3280 return &amd64_emit_ops
;
3283 return &i386_emit_ops
;
3287 x86_supports_range_stepping (void)
3292 /* This is initialized assuming an amd64 target.
3293 x86_arch_setup will correct it for i386 or amd64 targets. */
3295 struct linux_target_ops the_low_target
=
3298 x86_linux_regs_info
,
3299 x86_cannot_fetch_register
,
3300 x86_cannot_store_register
,
3301 NULL
, /* fetch_register */
3311 x86_stopped_by_watchpoint
,
3312 x86_stopped_data_address
,
3313 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3314 native i386 case (no registers smaller than an xfer unit), and are not
3315 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3318 /* need to fix up i386 siginfo if host is amd64 */
3320 x86_linux_new_process
,
3321 x86_linux_new_thread
,
3322 x86_linux_prepare_to_resume
,
3323 x86_linux_process_qsupported
,
3324 x86_supports_tracepoints
,
3325 x86_get_thread_area
,
3326 x86_install_fast_tracepoint_jump_pad
,
3328 x86_get_min_fast_tracepoint_insn_len
,
3329 x86_supports_range_stepping
,
3333 initialize_low_arch (void)
3335 /* Initialize the Linux target descriptions. */
3337 init_registers_amd64_linux ();
3338 init_registers_amd64_avx_linux ();
3339 init_registers_x32_linux ();
3340 init_registers_x32_avx_linux ();
3342 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3343 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3344 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3346 init_registers_i386_linux ();
3347 init_registers_i386_mmx_linux ();
3348 init_registers_i386_avx_linux ();
3350 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3351 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3352 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3354 initialize_regsets_info (&x86_regsets_info
);