1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-mpx-linux.c. */
52 void init_registers_amd64_mpx_linux (void);
53 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
55 /* Defined in auto-generated file x32-linux.c. */
56 void init_registers_x32_linux (void);
57 extern const struct target_desc
*tdesc_x32_linux
;
59 /* Defined in auto-generated file x32-avx-linux.c. */
60 void init_registers_x32_avx_linux (void);
61 extern const struct target_desc
*tdesc_x32_avx_linux
;
65 /* Defined in auto-generated file i386-linux.c. */
66 void init_registers_i386_linux (void);
67 extern const struct target_desc
*tdesc_i386_linux
;
69 /* Defined in auto-generated file i386-mmx-linux.c. */
70 void init_registers_i386_mmx_linux (void);
71 extern const struct target_desc
*tdesc_i386_mmx_linux
;
73 /* Defined in auto-generated file i386-avx-linux.c. */
74 void init_registers_i386_avx_linux (void);
75 extern const struct target_desc
*tdesc_i386_avx_linux
;
77 /* Defined in auto-generated file i386-mpx-linux.c. */
78 void init_registers_i386_mpx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mpx_linux
;
82 static struct target_desc
*tdesc_amd64_linux_no_xml
;
84 static struct target_desc
*tdesc_i386_linux_no_xml
;
87 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
88 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
90 /* Backward compatibility for gdb without XML support. */
92 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
93 <architecture>i386</architecture>\
94 <osabi>GNU/Linux</osabi>\
98 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
99 <architecture>i386:x86-64</architecture>\
100 <osabi>GNU/Linux</osabi>\
105 #include <sys/procfs.h>
106 #include <sys/ptrace.h>
109 #ifndef PTRACE_GETREGSET
110 #define PTRACE_GETREGSET 0x4204
113 #ifndef PTRACE_SETREGSET
114 #define PTRACE_SETREGSET 0x4205
118 #ifndef PTRACE_GET_THREAD_AREA
119 #define PTRACE_GET_THREAD_AREA 25
122 /* This definition comes from prctl.h, but some kernels may not have it. */
123 #ifndef PTRACE_ARCH_PRCTL
124 #define PTRACE_ARCH_PRCTL 30
127 /* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
130 #define ARCH_SET_GS 0x1001
131 #define ARCH_SET_FS 0x1002
132 #define ARCH_GET_FS 0x1003
133 #define ARCH_GET_GS 0x1004
136 /* Per-process arch-specific data we want to keep. */
138 struct arch_process_info
140 struct i386_debug_reg_state debug_reg_state
;
143 /* Per-thread arch-specific data we want to keep. */
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed
;
153 /* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156 static /*const*/ int i386_regmap
[] =
158 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
159 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
160 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
161 DS
* 8, ES
* 8, FS
* 8, GS
* 8
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
166 /* So code below doesn't have to care, i386 or amd64. */
167 #define ORIG_EAX ORIG_RAX
169 static const int x86_64_regmap
[] =
171 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
172 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
173 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
174 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
175 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
176 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
189 #else /* ! __x86_64__ */
191 /* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193 static /*const*/ int i386_regmap
[] =
195 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
196 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
197 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
198 DS
* 4, ES
* 4, FS
* 4, GS
* 4
201 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
207 /* Returns true if the current inferior belongs to a x86-64 process,
211 is_64bit_tdesc (void)
213 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
215 return register_size (regcache
->tdesc
, 0) == 8;
221 /* Called by libthread_db. */
224 ps_get_thread_area (const struct ps_prochandle
*ph
,
225 lwpid_t lwpid
, int idx
, void **base
)
228 int use_64bit
= is_64bit_tdesc ();
235 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
239 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
250 unsigned int desc
[4];
252 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
253 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base
= (void *) (uintptr_t) desc
[1];
262 /* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
268 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
271 int use_64bit
= is_64bit_tdesc ();
276 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
278 *addr
= (CORE_ADDR
) (uintptr_t) base
;
287 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
288 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
289 unsigned int desc
[4];
291 const int reg_thread_area
= 3; /* bits to scale down register value. */
294 collect_register_by_name (regcache
, "gs", &gs
);
296 idx
= gs
>> reg_thread_area
;
298 if (ptrace (PTRACE_GET_THREAD_AREA
,
300 (void *) (long) idx
, (unsigned long) &desc
) < 0)
311 x86_cannot_store_register (int regno
)
314 if (is_64bit_tdesc ())
318 return regno
>= I386_NUM_REGS
;
322 x86_cannot_fetch_register (int regno
)
325 if (is_64bit_tdesc ())
329 return regno
>= I386_NUM_REGS
;
333 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
338 if (register_size (regcache
->tdesc
, 0) == 8)
340 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
341 if (x86_64_regmap
[i
] != -1)
342 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
347 for (i
= 0; i
< I386_NUM_REGS
; i
++)
348 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
350 collect_register_by_name (regcache
, "orig_eax",
351 ((char *) buf
) + ORIG_EAX
* 4);
355 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
360 if (register_size (regcache
->tdesc
, 0) == 8)
362 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
363 if (x86_64_regmap
[i
] != -1)
364 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
369 for (i
= 0; i
< I386_NUM_REGS
; i
++)
370 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
372 supply_register_by_name (regcache
, "orig_eax",
373 ((char *) buf
) + ORIG_EAX
* 4);
377 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
380 i387_cache_to_fxsave (regcache
, buf
);
382 i387_cache_to_fsave (regcache
, buf
);
387 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
390 i387_fxsave_to_cache (regcache
, buf
);
392 i387_fsave_to_cache (regcache
, buf
);
399 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
401 i387_cache_to_fxsave (regcache
, buf
);
405 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
407 i387_fxsave_to_cache (regcache
, buf
);
413 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
415 i387_cache_to_xsave (regcache
, buf
);
419 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
421 i387_xsave_to_cache (regcache
, buf
);
424 /* ??? The non-biarch i386 case stores all the i387 regs twice.
425 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
426 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
427 doesn't work. IWBN to avoid the duplication in the case where it
428 does work. Maybe the arch_setup routine could check whether it works
429 and update the supported regsets accordingly. */
431 static struct regset_info x86_regsets
[] =
433 #ifdef HAVE_PTRACE_GETREGS
434 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
436 x86_fill_gregset
, x86_store_gregset
},
437 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
438 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
440 # ifdef HAVE_PTRACE_GETFPXREGS
441 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
443 x86_fill_fpxregset
, x86_store_fpxregset
},
446 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
448 x86_fill_fpregset
, x86_store_fpregset
},
449 #endif /* HAVE_PTRACE_GETREGS */
450 { 0, 0, 0, -1, -1, NULL
, NULL
}
454 x86_get_pc (struct regcache
*regcache
)
456 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
461 collect_register_by_name (regcache
, "rip", &pc
);
462 return (CORE_ADDR
) pc
;
467 collect_register_by_name (regcache
, "eip", &pc
);
468 return (CORE_ADDR
) pc
;
473 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
475 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
479 unsigned long newpc
= pc
;
480 supply_register_by_name (regcache
, "rip", &newpc
);
484 unsigned int newpc
= pc
;
485 supply_register_by_name (regcache
, "eip", &newpc
);
489 static const unsigned char x86_breakpoint
[] = { 0xCC };
490 #define x86_breakpoint_len 1
493 x86_breakpoint_at (CORE_ADDR pc
)
497 (*the_target
->read_memory
) (pc
, &c
, 1);
504 /* Support for debug registers. */
507 x86_linux_dr_get (ptid_t ptid
, int regnum
)
512 tid
= ptid_get_lwp (ptid
);
515 value
= ptrace (PTRACE_PEEKUSER
, tid
,
516 offsetof (struct user
, u_debugreg
[regnum
]), 0);
518 error ("Couldn't read debug register");
524 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
528 tid
= ptid_get_lwp (ptid
);
531 ptrace (PTRACE_POKEUSER
, tid
,
532 offsetof (struct user
, u_debugreg
[regnum
]), value
);
534 error ("Couldn't write debug register");
538 update_debug_registers_callback (struct inferior_list_entry
*entry
,
541 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
542 int pid
= *(int *) pid_p
;
544 /* Only update the threads of this process. */
545 if (pid_of (lwp
) == pid
)
547 /* The actual update is done later just before resuming the lwp,
548 we just mark that the registers need updating. */
549 lwp
->arch_private
->debug_registers_changed
= 1;
551 /* If the lwp isn't stopped, force it to momentarily pause, so
552 we can update its debug registers. */
554 linux_stop_lwp (lwp
);
560 /* Update the inferior's debug register REGNUM from STATE. */
563 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
565 /* Only update the threads of this process. */
566 int pid
= pid_of (get_thread_lwp (current_inferior
));
568 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
569 fatal ("Invalid debug register %d", regnum
);
571 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
574 /* Return the inferior's debug register REGNUM. */
577 i386_dr_low_get_addr (int regnum
)
579 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
580 ptid_t ptid
= ptid_of (lwp
);
582 /* DR6 and DR7 are retrieved with some other way. */
583 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
585 return x86_linux_dr_get (ptid
, regnum
);
588 /* Update the inferior's DR7 debug control register from STATE. */
591 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
593 /* Only update the threads of this process. */
594 int pid
= pid_of (get_thread_lwp (current_inferior
));
596 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
599 /* Return the inferior's DR7 debug control register. */
602 i386_dr_low_get_control (void)
604 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
605 ptid_t ptid
= ptid_of (lwp
);
607 return x86_linux_dr_get (ptid
, DR_CONTROL
);
610 /* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
614 i386_dr_low_get_status (void)
616 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
617 ptid_t ptid
= ptid_of (lwp
);
619 return x86_linux_dr_get (ptid
, DR_STATUS
);
622 /* Breakpoint/Watchpoint support. */
625 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
627 struct process_info
*proc
= current_process ();
630 case '0': /* software-breakpoint */
634 ret
= prepare_to_access_memory ();
637 ret
= set_gdb_breakpoint_at (addr
);
638 done_accessing_memory ();
641 case '1': /* hardware-breakpoint */
642 case '2': /* write watchpoint */
643 case '3': /* read watchpoint */
644 case '4': /* access watchpoint */
645 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
655 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
657 struct process_info
*proc
= current_process ();
660 case '0': /* software-breakpoint */
664 ret
= prepare_to_access_memory ();
667 ret
= delete_gdb_breakpoint_at (addr
);
668 done_accessing_memory ();
671 case '1': /* hardware-breakpoint */
672 case '2': /* write watchpoint */
673 case '3': /* read watchpoint */
674 case '4': /* access watchpoint */
675 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
684 x86_stopped_by_watchpoint (void)
686 struct process_info
*proc
= current_process ();
687 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
691 x86_stopped_data_address (void)
693 struct process_info
*proc
= current_process ();
695 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
701 /* Called when a new process is created. */
703 static struct arch_process_info
*
704 x86_linux_new_process (void)
706 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
708 i386_low_init_dregs (&info
->debug_reg_state
);
713 /* Called when a new thread is detected. */
715 static struct arch_lwp_info
*
716 x86_linux_new_thread (void)
718 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
720 info
->debug_registers_changed
= 1;
725 /* Called when resuming a thread.
726 If the debug regs have changed, update the thread's copies. */
729 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
731 ptid_t ptid
= ptid_of (lwp
);
732 int clear_status
= 0;
734 if (lwp
->arch_private
->debug_registers_changed
)
737 int pid
= ptid_get_pid (ptid
);
738 struct process_info
*proc
= find_process_pid (pid
);
739 struct i386_debug_reg_state
*state
740 = &proc
->private->arch_private
->debug_reg_state
;
742 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
743 if (state
->dr_ref_count
[i
] > 0)
745 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
747 /* If we're setting a watchpoint, any change the inferior
748 had done itself to the debug registers needs to be
749 discarded, otherwise, i386_low_stopped_data_address can
754 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
756 lwp
->arch_private
->debug_registers_changed
= 0;
759 if (clear_status
|| lwp
->stopped_by_watchpoint
)
760 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
763 /* When GDBSERVER is built as a 64-bit application on linux, the
764 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
765 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
766 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
767 conversion in-place ourselves. */
769 /* These types below (compat_*) define a siginfo type that is layout
770 compatible with the siginfo type exported by the 32-bit userspace
775 typedef int compat_int_t
;
776 typedef unsigned int compat_uptr_t
;
778 typedef int compat_time_t
;
779 typedef int compat_timer_t
;
780 typedef int compat_clock_t
;
782 struct compat_timeval
784 compat_time_t tv_sec
;
788 typedef union compat_sigval
790 compat_int_t sival_int
;
791 compat_uptr_t sival_ptr
;
794 typedef struct compat_siginfo
802 int _pad
[((128 / sizeof (int)) - 3)];
811 /* POSIX.1b timers */
816 compat_sigval_t _sigval
;
819 /* POSIX.1b signals */
824 compat_sigval_t _sigval
;
833 compat_clock_t _utime
;
834 compat_clock_t _stime
;
837 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
852 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
853 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
855 typedef struct compat_x32_siginfo
863 int _pad
[((128 / sizeof (int)) - 3)];
872 /* POSIX.1b timers */
877 compat_sigval_t _sigval
;
880 /* POSIX.1b signals */
885 compat_sigval_t _sigval
;
894 compat_x32_clock_t _utime
;
895 compat_x32_clock_t _stime
;
898 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
911 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
913 #define cpt_si_pid _sifields._kill._pid
914 #define cpt_si_uid _sifields._kill._uid
915 #define cpt_si_timerid _sifields._timer._tid
916 #define cpt_si_overrun _sifields._timer._overrun
917 #define cpt_si_status _sifields._sigchld._status
918 #define cpt_si_utime _sifields._sigchld._utime
919 #define cpt_si_stime _sifields._sigchld._stime
920 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
921 #define cpt_si_addr _sifields._sigfault._addr
922 #define cpt_si_band _sifields._sigpoll._band
923 #define cpt_si_fd _sifields._sigpoll._fd
925 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
926 In their place is si_timer1,si_timer2. */
928 #define si_timerid si_timer1
931 #define si_overrun si_timer2
935 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
937 memset (to
, 0, sizeof (*to
));
939 to
->si_signo
= from
->si_signo
;
940 to
->si_errno
= from
->si_errno
;
941 to
->si_code
= from
->si_code
;
943 if (to
->si_code
== SI_TIMER
)
945 to
->cpt_si_timerid
= from
->si_timerid
;
946 to
->cpt_si_overrun
= from
->si_overrun
;
947 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
949 else if (to
->si_code
== SI_USER
)
951 to
->cpt_si_pid
= from
->si_pid
;
952 to
->cpt_si_uid
= from
->si_uid
;
954 else if (to
->si_code
< 0)
956 to
->cpt_si_pid
= from
->si_pid
;
957 to
->cpt_si_uid
= from
->si_uid
;
958 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
962 switch (to
->si_signo
)
965 to
->cpt_si_pid
= from
->si_pid
;
966 to
->cpt_si_uid
= from
->si_uid
;
967 to
->cpt_si_status
= from
->si_status
;
968 to
->cpt_si_utime
= from
->si_utime
;
969 to
->cpt_si_stime
= from
->si_stime
;
975 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
978 to
->cpt_si_band
= from
->si_band
;
979 to
->cpt_si_fd
= from
->si_fd
;
982 to
->cpt_si_pid
= from
->si_pid
;
983 to
->cpt_si_uid
= from
->si_uid
;
984 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
991 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
993 memset (to
, 0, sizeof (*to
));
995 to
->si_signo
= from
->si_signo
;
996 to
->si_errno
= from
->si_errno
;
997 to
->si_code
= from
->si_code
;
999 if (to
->si_code
== SI_TIMER
)
1001 to
->si_timerid
= from
->cpt_si_timerid
;
1002 to
->si_overrun
= from
->cpt_si_overrun
;
1003 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1005 else if (to
->si_code
== SI_USER
)
1007 to
->si_pid
= from
->cpt_si_pid
;
1008 to
->si_uid
= from
->cpt_si_uid
;
1010 else if (to
->si_code
< 0)
1012 to
->si_pid
= from
->cpt_si_pid
;
1013 to
->si_uid
= from
->cpt_si_uid
;
1014 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1018 switch (to
->si_signo
)
1021 to
->si_pid
= from
->cpt_si_pid
;
1022 to
->si_uid
= from
->cpt_si_uid
;
1023 to
->si_status
= from
->cpt_si_status
;
1024 to
->si_utime
= from
->cpt_si_utime
;
1025 to
->si_stime
= from
->cpt_si_stime
;
1031 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1034 to
->si_band
= from
->cpt_si_band
;
1035 to
->si_fd
= from
->cpt_si_fd
;
1038 to
->si_pid
= from
->cpt_si_pid
;
1039 to
->si_uid
= from
->cpt_si_uid
;
1040 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1047 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1050 memset (to
, 0, sizeof (*to
));
1052 to
->si_signo
= from
->si_signo
;
1053 to
->si_errno
= from
->si_errno
;
1054 to
->si_code
= from
->si_code
;
1056 if (to
->si_code
== SI_TIMER
)
1058 to
->cpt_si_timerid
= from
->si_timerid
;
1059 to
->cpt_si_overrun
= from
->si_overrun
;
1060 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1062 else if (to
->si_code
== SI_USER
)
1064 to
->cpt_si_pid
= from
->si_pid
;
1065 to
->cpt_si_uid
= from
->si_uid
;
1067 else if (to
->si_code
< 0)
1069 to
->cpt_si_pid
= from
->si_pid
;
1070 to
->cpt_si_uid
= from
->si_uid
;
1071 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1075 switch (to
->si_signo
)
1078 to
->cpt_si_pid
= from
->si_pid
;
1079 to
->cpt_si_uid
= from
->si_uid
;
1080 to
->cpt_si_status
= from
->si_status
;
1081 to
->cpt_si_utime
= from
->si_utime
;
1082 to
->cpt_si_stime
= from
->si_stime
;
1088 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1091 to
->cpt_si_band
= from
->si_band
;
1092 to
->cpt_si_fd
= from
->si_fd
;
1095 to
->cpt_si_pid
= from
->si_pid
;
1096 to
->cpt_si_uid
= from
->si_uid
;
1097 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1104 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1105 compat_x32_siginfo_t
*from
)
1107 memset (to
, 0, sizeof (*to
));
1109 to
->si_signo
= from
->si_signo
;
1110 to
->si_errno
= from
->si_errno
;
1111 to
->si_code
= from
->si_code
;
1113 if (to
->si_code
== SI_TIMER
)
1115 to
->si_timerid
= from
->cpt_si_timerid
;
1116 to
->si_overrun
= from
->cpt_si_overrun
;
1117 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1119 else if (to
->si_code
== SI_USER
)
1121 to
->si_pid
= from
->cpt_si_pid
;
1122 to
->si_uid
= from
->cpt_si_uid
;
1124 else if (to
->si_code
< 0)
1126 to
->si_pid
= from
->cpt_si_pid
;
1127 to
->si_uid
= from
->cpt_si_uid
;
1128 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1132 switch (to
->si_signo
)
1135 to
->si_pid
= from
->cpt_si_pid
;
1136 to
->si_uid
= from
->cpt_si_uid
;
1137 to
->si_status
= from
->cpt_si_status
;
1138 to
->si_utime
= from
->cpt_si_utime
;
1139 to
->si_stime
= from
->cpt_si_stime
;
1145 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1148 to
->si_band
= from
->cpt_si_band
;
1149 to
->si_fd
= from
->cpt_si_fd
;
1152 to
->si_pid
= from
->cpt_si_pid
;
1153 to
->si_uid
= from
->cpt_si_uid
;
1154 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1160 #endif /* __x86_64__ */
1162 /* Convert a native/host siginfo object, into/from the siginfo in the
1163 layout of the inferiors' architecture. Returns true if any
1164 conversion was done; false otherwise. If DIRECTION is 1, then copy
1165 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1169 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1172 unsigned int machine
;
1173 int tid
= lwpid_of (get_thread_lwp (current_inferior
));
1174 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1176 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1177 if (!is_64bit_tdesc ())
1179 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1180 fatal ("unexpected difference in siginfo");
1183 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1185 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1189 /* No fixup for native x32 GDB. */
1190 else if (!is_elf64
&& sizeof (void *) == 8)
1192 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1193 fatal ("unexpected difference in siginfo");
1196 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1199 siginfo_from_compat_x32_siginfo (native
,
1200 (struct compat_x32_siginfo
*) inf
);
1211 /* Format of XSAVE extended state is:
1214 fxsave_bytes[0..463]
1215 sw_usable_bytes[464..511]
1216 xstate_hdr_bytes[512..575]
1221 Same memory layout will be used for the coredump NT_X86_XSTATE
1222 representing the XSAVE extended state registers.
1224 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1225 extended state mask, which is the same as the extended control register
1226 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1227 together with the mask saved in the xstate_hdr_bytes to determine what
1228 states the processor/OS supports and what state, used or initialized,
1229 the process/thread is in. */
1230 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1232 /* Does the current host support the GETFPXREGS request? The header
1233 file may or may not define it, and even if it is defined, the
1234 kernel will return EIO if it's running on a pre-SSE processor. */
1235 int have_ptrace_getfpxregs
=
1236 #ifdef HAVE_PTRACE_GETFPXREGS
1243 /* Does the current host support PTRACE_GETREGSET? */
1244 static int have_ptrace_getregset
= -1;
1246 /* Get Linux/x86 target description from running target. */
1248 static const struct target_desc
*
1249 x86_linux_read_description (void)
1251 unsigned int machine
;
1255 static uint64_t xcr0
;
1256 struct regset_info
*regset
;
1258 tid
= lwpid_of (get_thread_lwp (current_inferior
));
1260 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1262 if (sizeof (void *) == 4)
1265 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1267 else if (machine
== EM_X86_64
)
1268 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1272 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1273 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1275 elf_fpxregset_t fpxregs
;
1277 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1279 have_ptrace_getfpxregs
= 0;
1280 have_ptrace_getregset
= 0;
1281 return tdesc_i386_mmx_linux
;
1284 have_ptrace_getfpxregs
= 1;
1290 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1292 /* Don't use XML. */
1294 if (machine
== EM_X86_64
)
1295 return tdesc_amd64_linux_no_xml
;
1298 return tdesc_i386_linux_no_xml
;
1301 if (have_ptrace_getregset
== -1)
1303 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1306 iov
.iov_base
= xstateregs
;
1307 iov
.iov_len
= sizeof (xstateregs
);
1309 /* Check if PTRACE_GETREGSET works. */
1310 if (ptrace (PTRACE_GETREGSET
, tid
,
1311 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1312 have_ptrace_getregset
= 0;
1315 have_ptrace_getregset
= 1;
1317 /* Get XCR0 from XSAVE extended state. */
1318 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1319 / sizeof (uint64_t))];
1321 /* Use PTRACE_GETREGSET if it is available. */
1322 for (regset
= x86_regsets
;
1323 regset
->fill_function
!= NULL
; regset
++)
1324 if (regset
->get_request
== PTRACE_GETREGSET
)
1325 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1326 else if (regset
->type
!= GENERAL_REGS
)
1331 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1332 xcr0_features
= (have_ptrace_getregset
1333 && (xcr0
& I386_XSTATE_ALL_MASK
));
1338 if (machine
== EM_X86_64
)
1345 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1347 case I386_XSTATE_MPX_MASK
:
1348 return tdesc_amd64_mpx_linux
;
1350 case I386_XSTATE_AVX_MASK
:
1351 return tdesc_amd64_avx_linux
;
1354 return tdesc_amd64_linux
;
1358 return tdesc_amd64_linux
;
1364 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1366 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1367 case I386_XSTATE_AVX_MASK
:
1368 return tdesc_x32_avx_linux
;
1371 return tdesc_x32_linux
;
1375 return tdesc_x32_linux
;
1383 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1385 case (I386_XSTATE_MPX_MASK
):
1386 return tdesc_i386_mpx_linux
;
1388 case (I386_XSTATE_AVX_MASK
):
1389 return tdesc_i386_avx_linux
;
1392 return tdesc_i386_linux
;
1396 return tdesc_i386_linux
;
1399 gdb_assert_not_reached ("failed to return tdesc");
1402 /* Callback for find_inferior. Stops iteration when a thread with a
1403 given PID is found. */
1406 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1408 int pid
= *(int *) data
;
1410 return (ptid_get_pid (entry
->id
) == pid
);
1413 /* Callback for for_each_inferior. Calls the arch_setup routine for
1417 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1419 int pid
= ptid_get_pid (entry
->id
);
1421 /* Look up any thread of this processes. */
1423 = (struct thread_info
*) find_inferior (&all_threads
,
1424 same_process_callback
, &pid
);
1426 the_low_target
.arch_setup ();
1429 /* Update all the target description of all processes; a new GDB
1430 connected, and it may or not support xml target descriptions. */
1433 x86_linux_update_xmltarget (void)
1435 struct thread_info
*save_inferior
= current_inferior
;
1437 /* Before changing the register cache's internal layout, flush the
1438 contents of the current valid caches back to the threads, and
1439 release the current regcache objects. */
1440 regcache_release ();
1442 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1444 current_inferior
= save_inferior
;
1447 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1448 PTRACE_GETREGSET. */
1451 x86_linux_process_qsupported (const char *query
)
1453 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1454 with "i386" in qSupported query, it supports x86 XML target
1457 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1459 char *copy
= xstrdup (query
+ 13);
1462 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1464 if (strcmp (p
, "i386") == 0)
1474 x86_linux_update_xmltarget ();
1477 /* Common for x86/x86-64. */
1479 static struct regsets_info x86_regsets_info
=
1481 x86_regsets
, /* regsets */
1482 0, /* num_regsets */
1483 NULL
, /* disabled_regsets */
1487 static struct regs_info amd64_linux_regs_info
=
1489 NULL
, /* regset_bitmap */
1490 NULL
, /* usrregs_info */
1494 static struct usrregs_info i386_linux_usrregs_info
=
1500 static struct regs_info i386_linux_regs_info
=
1502 NULL
, /* regset_bitmap */
1503 &i386_linux_usrregs_info
,
1507 const struct regs_info
*
1508 x86_linux_regs_info (void)
1511 if (is_64bit_tdesc ())
1512 return &amd64_linux_regs_info
;
1515 return &i386_linux_regs_info
;
1518 /* Initialize the target description for the architecture of the
1522 x86_arch_setup (void)
1524 current_process ()->tdesc
= x86_linux_read_description ();
1528 x86_supports_tracepoints (void)
1534 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1536 write_inferior_memory (*to
, buf
, len
);
1541 push_opcode (unsigned char *buf
, char *op
)
1543 unsigned char *buf_org
= buf
;
1548 unsigned long ul
= strtoul (op
, &endptr
, 16);
1557 return buf
- buf_org
;
1562 /* Build a jump pad that saves registers and calls a collection
1563 function. Writes a jump instruction to the jump pad to
1564 JJUMPAD_INSN. The caller is responsible to write it in at the
1565 tracepoint address. */
1568 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1569 CORE_ADDR collector
,
1572 CORE_ADDR
*jump_entry
,
1573 CORE_ADDR
*trampoline
,
1574 ULONGEST
*trampoline_size
,
1575 unsigned char *jjump_pad_insn
,
1576 ULONGEST
*jjump_pad_insn_size
,
1577 CORE_ADDR
*adjusted_insn_addr
,
1578 CORE_ADDR
*adjusted_insn_addr_end
,
1581 unsigned char buf
[40];
1585 CORE_ADDR buildaddr
= *jump_entry
;
1587 /* Build the jump pad. */
1589 /* First, do tracepoint data collection. Save registers. */
1591 /* Need to ensure stack pointer saved first. */
1592 buf
[i
++] = 0x54; /* push %rsp */
1593 buf
[i
++] = 0x55; /* push %rbp */
1594 buf
[i
++] = 0x57; /* push %rdi */
1595 buf
[i
++] = 0x56; /* push %rsi */
1596 buf
[i
++] = 0x52; /* push %rdx */
1597 buf
[i
++] = 0x51; /* push %rcx */
1598 buf
[i
++] = 0x53; /* push %rbx */
1599 buf
[i
++] = 0x50; /* push %rax */
1600 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1601 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1602 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1603 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1604 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1605 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1606 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1607 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1608 buf
[i
++] = 0x9c; /* pushfq */
1609 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1611 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1612 i
+= sizeof (unsigned long);
1613 buf
[i
++] = 0x57; /* push %rdi */
1614 append_insns (&buildaddr
, i
, buf
);
1616 /* Stack space for the collecting_t object. */
1618 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1619 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1620 memcpy (buf
+ i
, &tpoint
, 8);
1622 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1623 i
+= push_opcode (&buf
[i
],
1624 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1625 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1626 append_insns (&buildaddr
, i
, buf
);
1630 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1631 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1633 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1634 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1635 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1636 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1637 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1638 append_insns (&buildaddr
, i
, buf
);
1640 /* Set up the gdb_collect call. */
1641 /* At this point, (stack pointer + 0x18) is the base of our saved
1645 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1646 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1648 /* tpoint address may be 64-bit wide. */
1649 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1650 memcpy (buf
+ i
, &tpoint
, 8);
1652 append_insns (&buildaddr
, i
, buf
);
1654 /* The collector function being in the shared library, may be
1655 >31-bits away off the jump pad. */
1657 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1658 memcpy (buf
+ i
, &collector
, 8);
1660 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1661 append_insns (&buildaddr
, i
, buf
);
1663 /* Clear the spin-lock. */
1665 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1666 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1667 memcpy (buf
+ i
, &lockaddr
, 8);
1669 append_insns (&buildaddr
, i
, buf
);
1671 /* Remove stack that had been used for the collect_t object. */
1673 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1674 append_insns (&buildaddr
, i
, buf
);
1676 /* Restore register state. */
1678 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1682 buf
[i
++] = 0x9d; /* popfq */
1683 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1684 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1685 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1686 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1687 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1688 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1689 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1690 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1691 buf
[i
++] = 0x58; /* pop %rax */
1692 buf
[i
++] = 0x5b; /* pop %rbx */
1693 buf
[i
++] = 0x59; /* pop %rcx */
1694 buf
[i
++] = 0x5a; /* pop %rdx */
1695 buf
[i
++] = 0x5e; /* pop %rsi */
1696 buf
[i
++] = 0x5f; /* pop %rdi */
1697 buf
[i
++] = 0x5d; /* pop %rbp */
1698 buf
[i
++] = 0x5c; /* pop %rsp */
1699 append_insns (&buildaddr
, i
, buf
);
1701 /* Now, adjust the original instruction to execute in the jump
1703 *adjusted_insn_addr
= buildaddr
;
1704 relocate_instruction (&buildaddr
, tpaddr
);
1705 *adjusted_insn_addr_end
= buildaddr
;
1707 /* Finally, write a jump back to the program. */
1709 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1710 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1713 "E.Jump back from jump pad too far from tracepoint "
1714 "(offset 0x%" PRIx64
" > int32).", loffset
);
1718 offset
= (int) loffset
;
1719 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1720 memcpy (buf
+ 1, &offset
, 4);
1721 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1723 /* The jump pad is now built. Wire in a jump to our jump pad. This
1724 is always done last (by our caller actually), so that we can
1725 install fast tracepoints with threads running. This relies on
1726 the agent's atomic write support. */
1727 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1728 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1731 "E.Jump pad too far from tracepoint "
1732 "(offset 0x%" PRIx64
" > int32).", loffset
);
1736 offset
= (int) loffset
;
1738 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1739 memcpy (buf
+ 1, &offset
, 4);
1740 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1741 *jjump_pad_insn_size
= sizeof (jump_insn
);
1743 /* Return the end address of our pad. */
1744 *jump_entry
= buildaddr
;
1749 #endif /* __x86_64__ */
1751 /* Build a jump pad that saves registers and calls a collection
1752 function. Writes a jump instruction to the jump pad to
1753 JJUMPAD_INSN. The caller is responsible to write it in at the
1754 tracepoint address. */
1757 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1758 CORE_ADDR collector
,
1761 CORE_ADDR
*jump_entry
,
1762 CORE_ADDR
*trampoline
,
1763 ULONGEST
*trampoline_size
,
1764 unsigned char *jjump_pad_insn
,
1765 ULONGEST
*jjump_pad_insn_size
,
1766 CORE_ADDR
*adjusted_insn_addr
,
1767 CORE_ADDR
*adjusted_insn_addr_end
,
1770 unsigned char buf
[0x100];
1772 CORE_ADDR buildaddr
= *jump_entry
;
1774 /* Build the jump pad. */
1776 /* First, do tracepoint data collection. Save registers. */
1778 buf
[i
++] = 0x60; /* pushad */
1779 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1780 *((int *)(buf
+ i
)) = (int) tpaddr
;
1782 buf
[i
++] = 0x9c; /* pushf */
1783 buf
[i
++] = 0x1e; /* push %ds */
1784 buf
[i
++] = 0x06; /* push %es */
1785 buf
[i
++] = 0x0f; /* push %fs */
1787 buf
[i
++] = 0x0f; /* push %gs */
1789 buf
[i
++] = 0x16; /* push %ss */
1790 buf
[i
++] = 0x0e; /* push %cs */
1791 append_insns (&buildaddr
, i
, buf
);
1793 /* Stack space for the collecting_t object. */
1795 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1797 /* Build the object. */
1798 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1799 memcpy (buf
+ i
, &tpoint
, 4);
1801 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1803 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1804 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1805 append_insns (&buildaddr
, i
, buf
);
1807 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1808 If we cared for it, this could be using xchg alternatively. */
1811 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1812 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1814 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1816 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1817 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1818 append_insns (&buildaddr
, i
, buf
);
1821 /* Set up arguments to the gdb_collect call. */
1823 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1824 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1825 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1826 append_insns (&buildaddr
, i
, buf
);
1829 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1830 append_insns (&buildaddr
, i
, buf
);
1833 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1834 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1836 append_insns (&buildaddr
, i
, buf
);
1838 buf
[0] = 0xe8; /* call <reladdr> */
1839 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1840 memcpy (buf
+ 1, &offset
, 4);
1841 append_insns (&buildaddr
, 5, buf
);
1842 /* Clean up after the call. */
1843 buf
[0] = 0x83; /* add $0x8,%esp */
1846 append_insns (&buildaddr
, 3, buf
);
1849 /* Clear the spin-lock. This would need the LOCK prefix on older
1852 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1853 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1854 memcpy (buf
+ i
, &lockaddr
, 4);
1856 append_insns (&buildaddr
, i
, buf
);
1859 /* Remove stack that had been used for the collect_t object. */
1861 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1862 append_insns (&buildaddr
, i
, buf
);
1865 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1868 buf
[i
++] = 0x17; /* pop %ss */
1869 buf
[i
++] = 0x0f; /* pop %gs */
1871 buf
[i
++] = 0x0f; /* pop %fs */
1873 buf
[i
++] = 0x07; /* pop %es */
1874 buf
[i
++] = 0x1f; /* pop %ds */
1875 buf
[i
++] = 0x9d; /* popf */
1876 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1879 buf
[i
++] = 0x61; /* popad */
1880 append_insns (&buildaddr
, i
, buf
);
1882 /* Now, adjust the original instruction to execute in the jump
1884 *adjusted_insn_addr
= buildaddr
;
1885 relocate_instruction (&buildaddr
, tpaddr
);
1886 *adjusted_insn_addr_end
= buildaddr
;
1888 /* Write the jump back to the program. */
1889 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1890 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1891 memcpy (buf
+ 1, &offset
, 4);
1892 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1894 /* The jump pad is now built. Wire in a jump to our jump pad. This
1895 is always done last (by our caller actually), so that we can
1896 install fast tracepoints with threads running. This relies on
1897 the agent's atomic write support. */
1900 /* Create a trampoline. */
1901 *trampoline_size
= sizeof (jump_insn
);
1902 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1904 /* No trampoline space available. */
1906 "E.Cannot allocate trampoline space needed for fast "
1907 "tracepoints on 4-byte instructions.");
1911 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1912 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1913 memcpy (buf
+ 1, &offset
, 4);
1914 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1916 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1917 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1918 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1919 memcpy (buf
+ 2, &offset
, 2);
1920 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1921 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1925 /* Else use a 32-bit relative jump instruction. */
1926 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1927 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1928 memcpy (buf
+ 1, &offset
, 4);
1929 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1930 *jjump_pad_insn_size
= sizeof (jump_insn
);
1933 /* Return the end address of our pad. */
1934 *jump_entry
= buildaddr
;
1940 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1941 CORE_ADDR collector
,
1944 CORE_ADDR
*jump_entry
,
1945 CORE_ADDR
*trampoline
,
1946 ULONGEST
*trampoline_size
,
1947 unsigned char *jjump_pad_insn
,
1948 ULONGEST
*jjump_pad_insn_size
,
1949 CORE_ADDR
*adjusted_insn_addr
,
1950 CORE_ADDR
*adjusted_insn_addr_end
,
1954 if (is_64bit_tdesc ())
1955 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1956 collector
, lockaddr
,
1957 orig_size
, jump_entry
,
1958 trampoline
, trampoline_size
,
1960 jjump_pad_insn_size
,
1962 adjusted_insn_addr_end
,
1966 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1967 collector
, lockaddr
,
1968 orig_size
, jump_entry
,
1969 trampoline
, trampoline_size
,
1971 jjump_pad_insn_size
,
1973 adjusted_insn_addr_end
,
1977 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1981 x86_get_min_fast_tracepoint_insn_len (void)
1983 static int warned_about_fast_tracepoints
= 0;
1986 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1987 used for fast tracepoints. */
1988 if (is_64bit_tdesc ())
1992 if (agent_loaded_p ())
1994 char errbuf
[IPA_BUFSIZ
];
1998 /* On x86, if trampolines are available, then 4-byte jump instructions
1999 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2000 with a 4-byte offset are used instead. */
2001 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2005 /* GDB has no channel to explain to user why a shorter fast
2006 tracepoint is not possible, but at least make GDBserver
2007 mention that something has gone awry. */
2008 if (!warned_about_fast_tracepoints
)
2010 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2011 warned_about_fast_tracepoints
= 1;
2018 /* Indicate that the minimum length is currently unknown since the IPA
2019 has not loaded yet. */
2025 add_insns (unsigned char *start
, int len
)
2027 CORE_ADDR buildaddr
= current_insn_ptr
;
2030 debug_printf ("Adding %d bytes of insn at %s\n",
2031 len
, paddress (buildaddr
));
2033 append_insns (&buildaddr
, len
, start
);
2034 current_insn_ptr
= buildaddr
;
2037 /* Our general strategy for emitting code is to avoid specifying raw
2038 bytes whenever possible, and instead copy a block of inline asm
2039 that is embedded in the function. This is a little messy, because
2040 we need to keep the compiler from discarding what looks like dead
2041 code, plus suppress various warnings. */
2043 #define EMIT_ASM(NAME, INSNS) \
2046 extern unsigned char start_ ## NAME, end_ ## NAME; \
2047 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2048 __asm__ ("jmp end_" #NAME "\n" \
2049 "\t" "start_" #NAME ":" \
2051 "\t" "end_" #NAME ":"); \
2056 #define EMIT_ASM32(NAME,INSNS) \
2059 extern unsigned char start_ ## NAME, end_ ## NAME; \
2060 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2061 __asm__ (".code32\n" \
2062 "\t" "jmp end_" #NAME "\n" \
2063 "\t" "start_" #NAME ":\n" \
2065 "\t" "end_" #NAME ":\n" \
2071 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2078 amd64_emit_prologue (void)
2080 EMIT_ASM (amd64_prologue
,
2082 "movq %rsp,%rbp\n\t"
2083 "sub $0x20,%rsp\n\t"
2084 "movq %rdi,-8(%rbp)\n\t"
2085 "movq %rsi,-16(%rbp)");
2090 amd64_emit_epilogue (void)
2092 EMIT_ASM (amd64_epilogue
,
2093 "movq -16(%rbp),%rdi\n\t"
2094 "movq %rax,(%rdi)\n\t"
2101 amd64_emit_add (void)
2103 EMIT_ASM (amd64_add
,
2104 "add (%rsp),%rax\n\t"
2105 "lea 0x8(%rsp),%rsp");
2109 amd64_emit_sub (void)
2111 EMIT_ASM (amd64_sub
,
2112 "sub %rax,(%rsp)\n\t"
2117 amd64_emit_mul (void)
2123 amd64_emit_lsh (void)
2129 amd64_emit_rsh_signed (void)
2135 amd64_emit_rsh_unsigned (void)
2141 amd64_emit_ext (int arg
)
2146 EMIT_ASM (amd64_ext_8
,
2152 EMIT_ASM (amd64_ext_16
,
2157 EMIT_ASM (amd64_ext_32
,
2166 amd64_emit_log_not (void)
2168 EMIT_ASM (amd64_log_not
,
2169 "test %rax,%rax\n\t"
2175 amd64_emit_bit_and (void)
2177 EMIT_ASM (amd64_and
,
2178 "and (%rsp),%rax\n\t"
2179 "lea 0x8(%rsp),%rsp");
2183 amd64_emit_bit_or (void)
2186 "or (%rsp),%rax\n\t"
2187 "lea 0x8(%rsp),%rsp");
2191 amd64_emit_bit_xor (void)
2193 EMIT_ASM (amd64_xor
,
2194 "xor (%rsp),%rax\n\t"
2195 "lea 0x8(%rsp),%rsp");
2199 amd64_emit_bit_not (void)
2201 EMIT_ASM (amd64_bit_not
,
2202 "xorq $0xffffffffffffffff,%rax");
2206 amd64_emit_equal (void)
2208 EMIT_ASM (amd64_equal
,
2209 "cmp %rax,(%rsp)\n\t"
2210 "je .Lamd64_equal_true\n\t"
2212 "jmp .Lamd64_equal_end\n\t"
2213 ".Lamd64_equal_true:\n\t"
2215 ".Lamd64_equal_end:\n\t"
2216 "lea 0x8(%rsp),%rsp");
2220 amd64_emit_less_signed (void)
2222 EMIT_ASM (amd64_less_signed
,
2223 "cmp %rax,(%rsp)\n\t"
2224 "jl .Lamd64_less_signed_true\n\t"
2226 "jmp .Lamd64_less_signed_end\n\t"
2227 ".Lamd64_less_signed_true:\n\t"
2229 ".Lamd64_less_signed_end:\n\t"
2230 "lea 0x8(%rsp),%rsp");
2234 amd64_emit_less_unsigned (void)
2236 EMIT_ASM (amd64_less_unsigned
,
2237 "cmp %rax,(%rsp)\n\t"
2238 "jb .Lamd64_less_unsigned_true\n\t"
2240 "jmp .Lamd64_less_unsigned_end\n\t"
2241 ".Lamd64_less_unsigned_true:\n\t"
2243 ".Lamd64_less_unsigned_end:\n\t"
2244 "lea 0x8(%rsp),%rsp");
2248 amd64_emit_ref (int size
)
2253 EMIT_ASM (amd64_ref1
,
2257 EMIT_ASM (amd64_ref2
,
2261 EMIT_ASM (amd64_ref4
,
2262 "movl (%rax),%eax");
2265 EMIT_ASM (amd64_ref8
,
2266 "movq (%rax),%rax");
2272 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2274 EMIT_ASM (amd64_if_goto
,
2278 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2286 amd64_emit_goto (int *offset_p
, int *size_p
)
2288 EMIT_ASM (amd64_goto
,
2289 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2297 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2299 int diff
= (to
- (from
+ size
));
2300 unsigned char buf
[sizeof (int)];
2308 memcpy (buf
, &diff
, sizeof (int));
2309 write_inferior_memory (from
, buf
, sizeof (int));
2313 amd64_emit_const (LONGEST num
)
2315 unsigned char buf
[16];
2317 CORE_ADDR buildaddr
= current_insn_ptr
;
2320 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2321 memcpy (&buf
[i
], &num
, sizeof (num
));
2323 append_insns (&buildaddr
, i
, buf
);
2324 current_insn_ptr
= buildaddr
;
2328 amd64_emit_call (CORE_ADDR fn
)
2330 unsigned char buf
[16];
2332 CORE_ADDR buildaddr
;
2335 /* The destination function being in the shared library, may be
2336 >31-bits away off the compiled code pad. */
2338 buildaddr
= current_insn_ptr
;
2340 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2344 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2346 /* Offset is too large for a call. Use callq, but that requires
2347 a register, so avoid it if possible. Use r10, since it is
2348 call-clobbered, we don't have to push/pop it. */
2349 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2351 memcpy (buf
+ i
, &fn
, 8);
2353 buf
[i
++] = 0xff; /* callq *%r10 */
2358 int offset32
= offset64
; /* we know we can't overflow here. */
2359 memcpy (buf
+ i
, &offset32
, 4);
2363 append_insns (&buildaddr
, i
, buf
);
2364 current_insn_ptr
= buildaddr
;
2368 amd64_emit_reg (int reg
)
2370 unsigned char buf
[16];
2372 CORE_ADDR buildaddr
;
2374 /* Assume raw_regs is still in %rdi. */
2375 buildaddr
= current_insn_ptr
;
2377 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2378 memcpy (&buf
[i
], ®
, sizeof (reg
));
2380 append_insns (&buildaddr
, i
, buf
);
2381 current_insn_ptr
= buildaddr
;
2382 amd64_emit_call (get_raw_reg_func_addr ());
2386 amd64_emit_pop (void)
2388 EMIT_ASM (amd64_pop
,
2393 amd64_emit_stack_flush (void)
2395 EMIT_ASM (amd64_stack_flush
,
2400 amd64_emit_zero_ext (int arg
)
2405 EMIT_ASM (amd64_zero_ext_8
,
2409 EMIT_ASM (amd64_zero_ext_16
,
2410 "and $0xffff,%rax");
2413 EMIT_ASM (amd64_zero_ext_32
,
2414 "mov $0xffffffff,%rcx\n\t"
2423 amd64_emit_swap (void)
2425 EMIT_ASM (amd64_swap
,
2432 amd64_emit_stack_adjust (int n
)
2434 unsigned char buf
[16];
2436 CORE_ADDR buildaddr
= current_insn_ptr
;
2439 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2443 /* This only handles adjustments up to 16, but we don't expect any more. */
2445 append_insns (&buildaddr
, i
, buf
);
2446 current_insn_ptr
= buildaddr
;
2449 /* FN's prototype is `LONGEST(*fn)(int)'. */
2452 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2454 unsigned char buf
[16];
2456 CORE_ADDR buildaddr
;
2458 buildaddr
= current_insn_ptr
;
2460 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2461 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2463 append_insns (&buildaddr
, i
, buf
);
2464 current_insn_ptr
= buildaddr
;
2465 amd64_emit_call (fn
);
2468 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2471 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2473 unsigned char buf
[16];
2475 CORE_ADDR buildaddr
;
2477 buildaddr
= current_insn_ptr
;
2479 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2480 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2482 append_insns (&buildaddr
, i
, buf
);
2483 current_insn_ptr
= buildaddr
;
2484 EMIT_ASM (amd64_void_call_2_a
,
2485 /* Save away a copy of the stack top. */
2487 /* Also pass top as the second argument. */
2489 amd64_emit_call (fn
);
2490 EMIT_ASM (amd64_void_call_2_b
,
2491 /* Restore the stack top, %rax may have been trashed. */
2496 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2499 "cmp %rax,(%rsp)\n\t"
2500 "jne .Lamd64_eq_fallthru\n\t"
2501 "lea 0x8(%rsp),%rsp\n\t"
2503 /* jmp, but don't trust the assembler to choose the right jump */
2504 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2505 ".Lamd64_eq_fallthru:\n\t"
2506 "lea 0x8(%rsp),%rsp\n\t"
2516 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2519 "cmp %rax,(%rsp)\n\t"
2520 "je .Lamd64_ne_fallthru\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2523 /* jmp, but don't trust the assembler to choose the right jump */
2524 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2525 ".Lamd64_ne_fallthru:\n\t"
2526 "lea 0x8(%rsp),%rsp\n\t"
2536 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2539 "cmp %rax,(%rsp)\n\t"
2540 "jnl .Lamd64_lt_fallthru\n\t"
2541 "lea 0x8(%rsp),%rsp\n\t"
2543 /* jmp, but don't trust the assembler to choose the right jump */
2544 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2545 ".Lamd64_lt_fallthru:\n\t"
2546 "lea 0x8(%rsp),%rsp\n\t"
2556 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2559 "cmp %rax,(%rsp)\n\t"
2560 "jnle .Lamd64_le_fallthru\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2563 /* jmp, but don't trust the assembler to choose the right jump */
2564 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2565 ".Lamd64_le_fallthru:\n\t"
2566 "lea 0x8(%rsp),%rsp\n\t"
2576 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2579 "cmp %rax,(%rsp)\n\t"
2580 "jng .Lamd64_gt_fallthru\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2583 /* jmp, but don't trust the assembler to choose the right jump */
2584 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2585 ".Lamd64_gt_fallthru:\n\t"
2586 "lea 0x8(%rsp),%rsp\n\t"
2596 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2599 "cmp %rax,(%rsp)\n\t"
2600 "jnge .Lamd64_ge_fallthru\n\t"
2601 ".Lamd64_ge_jump:\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2604 /* jmp, but don't trust the assembler to choose the right jump */
2605 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2606 ".Lamd64_ge_fallthru:\n\t"
2607 "lea 0x8(%rsp),%rsp\n\t"
2616 struct emit_ops amd64_emit_ops
=
2618 amd64_emit_prologue
,
2619 amd64_emit_epilogue
,
2624 amd64_emit_rsh_signed
,
2625 amd64_emit_rsh_unsigned
,
2633 amd64_emit_less_signed
,
2634 amd64_emit_less_unsigned
,
2638 amd64_write_goto_address
,
2643 amd64_emit_stack_flush
,
2644 amd64_emit_zero_ext
,
2646 amd64_emit_stack_adjust
,
2647 amd64_emit_int_call_1
,
2648 amd64_emit_void_call_2
,
2657 #endif /* __x86_64__ */
2660 i386_emit_prologue (void)
2662 EMIT_ASM32 (i386_prologue
,
2666 /* At this point, the raw regs base address is at 8(%ebp), and the
2667 value pointer is at 12(%ebp). */
2671 i386_emit_epilogue (void)
2673 EMIT_ASM32 (i386_epilogue
,
2674 "mov 12(%ebp),%ecx\n\t"
2675 "mov %eax,(%ecx)\n\t"
2676 "mov %ebx,0x4(%ecx)\n\t"
2684 i386_emit_add (void)
2686 EMIT_ASM32 (i386_add
,
2687 "add (%esp),%eax\n\t"
2688 "adc 0x4(%esp),%ebx\n\t"
2689 "lea 0x8(%esp),%esp");
2693 i386_emit_sub (void)
2695 EMIT_ASM32 (i386_sub
,
2696 "subl %eax,(%esp)\n\t"
2697 "sbbl %ebx,4(%esp)\n\t"
2703 i386_emit_mul (void)
2709 i386_emit_lsh (void)
2715 i386_emit_rsh_signed (void)
2721 i386_emit_rsh_unsigned (void)
2727 i386_emit_ext (int arg
)
2732 EMIT_ASM32 (i386_ext_8
,
2735 "movl %eax,%ebx\n\t"
2739 EMIT_ASM32 (i386_ext_16
,
2741 "movl %eax,%ebx\n\t"
2745 EMIT_ASM32 (i386_ext_32
,
2746 "movl %eax,%ebx\n\t"
2755 i386_emit_log_not (void)
2757 EMIT_ASM32 (i386_log_not
,
2759 "test %eax,%eax\n\t"
2766 i386_emit_bit_and (void)
2768 EMIT_ASM32 (i386_and
,
2769 "and (%esp),%eax\n\t"
2770 "and 0x4(%esp),%ebx\n\t"
2771 "lea 0x8(%esp),%esp");
2775 i386_emit_bit_or (void)
2777 EMIT_ASM32 (i386_or
,
2778 "or (%esp),%eax\n\t"
2779 "or 0x4(%esp),%ebx\n\t"
2780 "lea 0x8(%esp),%esp");
2784 i386_emit_bit_xor (void)
2786 EMIT_ASM32 (i386_xor
,
2787 "xor (%esp),%eax\n\t"
2788 "xor 0x4(%esp),%ebx\n\t"
2789 "lea 0x8(%esp),%esp");
2793 i386_emit_bit_not (void)
2795 EMIT_ASM32 (i386_bit_not
,
2796 "xor $0xffffffff,%eax\n\t"
2797 "xor $0xffffffff,%ebx\n\t");
2801 i386_emit_equal (void)
2803 EMIT_ASM32 (i386_equal
,
2804 "cmpl %ebx,4(%esp)\n\t"
2805 "jne .Li386_equal_false\n\t"
2806 "cmpl %eax,(%esp)\n\t"
2807 "je .Li386_equal_true\n\t"
2808 ".Li386_equal_false:\n\t"
2810 "jmp .Li386_equal_end\n\t"
2811 ".Li386_equal_true:\n\t"
2813 ".Li386_equal_end:\n\t"
2815 "lea 0x8(%esp),%esp");
2819 i386_emit_less_signed (void)
2821 EMIT_ASM32 (i386_less_signed
,
2822 "cmpl %ebx,4(%esp)\n\t"
2823 "jl .Li386_less_signed_true\n\t"
2824 "jne .Li386_less_signed_false\n\t"
2825 "cmpl %eax,(%esp)\n\t"
2826 "jl .Li386_less_signed_true\n\t"
2827 ".Li386_less_signed_false:\n\t"
2829 "jmp .Li386_less_signed_end\n\t"
2830 ".Li386_less_signed_true:\n\t"
2832 ".Li386_less_signed_end:\n\t"
2834 "lea 0x8(%esp),%esp");
2838 i386_emit_less_unsigned (void)
2840 EMIT_ASM32 (i386_less_unsigned
,
2841 "cmpl %ebx,4(%esp)\n\t"
2842 "jb .Li386_less_unsigned_true\n\t"
2843 "jne .Li386_less_unsigned_false\n\t"
2844 "cmpl %eax,(%esp)\n\t"
2845 "jb .Li386_less_unsigned_true\n\t"
2846 ".Li386_less_unsigned_false:\n\t"
2848 "jmp .Li386_less_unsigned_end\n\t"
2849 ".Li386_less_unsigned_true:\n\t"
2851 ".Li386_less_unsigned_end:\n\t"
2853 "lea 0x8(%esp),%esp");
2857 i386_emit_ref (int size
)
2862 EMIT_ASM32 (i386_ref1
,
2866 EMIT_ASM32 (i386_ref2
,
2870 EMIT_ASM32 (i386_ref4
,
2871 "movl (%eax),%eax");
2874 EMIT_ASM32 (i386_ref8
,
2875 "movl 4(%eax),%ebx\n\t"
2876 "movl (%eax),%eax");
2882 i386_emit_if_goto (int *offset_p
, int *size_p
)
2884 EMIT_ASM32 (i386_if_goto
,
2890 /* Don't trust the assembler to choose the right jump */
2891 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2894 *offset_p
= 11; /* be sure that this matches the sequence above */
2900 i386_emit_goto (int *offset_p
, int *size_p
)
2902 EMIT_ASM32 (i386_goto
,
2903 /* Don't trust the assembler to choose the right jump */
2904 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2912 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2914 int diff
= (to
- (from
+ size
));
2915 unsigned char buf
[sizeof (int)];
2917 /* We're only doing 4-byte sizes at the moment. */
2924 memcpy (buf
, &diff
, sizeof (int));
2925 write_inferior_memory (from
, buf
, sizeof (int));
2929 i386_emit_const (LONGEST num
)
2931 unsigned char buf
[16];
2933 CORE_ADDR buildaddr
= current_insn_ptr
;
2936 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2937 lo
= num
& 0xffffffff;
2938 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2940 hi
= ((num
>> 32) & 0xffffffff);
2943 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2944 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2949 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2951 append_insns (&buildaddr
, i
, buf
);
2952 current_insn_ptr
= buildaddr
;
2956 i386_emit_call (CORE_ADDR fn
)
2958 unsigned char buf
[16];
2960 CORE_ADDR buildaddr
;
2962 buildaddr
= current_insn_ptr
;
2964 buf
[i
++] = 0xe8; /* call <reladdr> */
2965 offset
= ((int) fn
) - (buildaddr
+ 5);
2966 memcpy (buf
+ 1, &offset
, 4);
2967 append_insns (&buildaddr
, 5, buf
);
2968 current_insn_ptr
= buildaddr
;
2972 i386_emit_reg (int reg
)
2974 unsigned char buf
[16];
2976 CORE_ADDR buildaddr
;
2978 EMIT_ASM32 (i386_reg_a
,
2980 buildaddr
= current_insn_ptr
;
2982 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2983 memcpy (&buf
[i
], ®
, sizeof (reg
));
2985 append_insns (&buildaddr
, i
, buf
);
2986 current_insn_ptr
= buildaddr
;
2987 EMIT_ASM32 (i386_reg_b
,
2988 "mov %eax,4(%esp)\n\t"
2989 "mov 8(%ebp),%eax\n\t"
2991 i386_emit_call (get_raw_reg_func_addr ());
2992 EMIT_ASM32 (i386_reg_c
,
2994 "lea 0x8(%esp),%esp");
2998 i386_emit_pop (void)
3000 EMIT_ASM32 (i386_pop
,
3006 i386_emit_stack_flush (void)
3008 EMIT_ASM32 (i386_stack_flush
,
3014 i386_emit_zero_ext (int arg
)
3019 EMIT_ASM32 (i386_zero_ext_8
,
3020 "and $0xff,%eax\n\t"
3024 EMIT_ASM32 (i386_zero_ext_16
,
3025 "and $0xffff,%eax\n\t"
3029 EMIT_ASM32 (i386_zero_ext_32
,
3038 i386_emit_swap (void)
3040 EMIT_ASM32 (i386_swap
,
3050 i386_emit_stack_adjust (int n
)
3052 unsigned char buf
[16];
3054 CORE_ADDR buildaddr
= current_insn_ptr
;
3057 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3061 append_insns (&buildaddr
, i
, buf
);
3062 current_insn_ptr
= buildaddr
;
3065 /* FN's prototype is `LONGEST(*fn)(int)'. */
3068 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3070 unsigned char buf
[16];
3072 CORE_ADDR buildaddr
;
3074 EMIT_ASM32 (i386_int_call_1_a
,
3075 /* Reserve a bit of stack space. */
3077 /* Put the one argument on the stack. */
3078 buildaddr
= current_insn_ptr
;
3080 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3083 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3085 append_insns (&buildaddr
, i
, buf
);
3086 current_insn_ptr
= buildaddr
;
3087 i386_emit_call (fn
);
3088 EMIT_ASM32 (i386_int_call_1_c
,
3090 "lea 0x8(%esp),%esp");
3093 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3096 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3098 unsigned char buf
[16];
3100 CORE_ADDR buildaddr
;
3102 EMIT_ASM32 (i386_void_call_2_a
,
3103 /* Preserve %eax only; we don't have to worry about %ebx. */
3105 /* Reserve a bit of stack space for arguments. */
3106 "sub $0x10,%esp\n\t"
3107 /* Copy "top" to the second argument position. (Note that
3108 we can't assume function won't scribble on its
3109 arguments, so don't try to restore from this.) */
3110 "mov %eax,4(%esp)\n\t"
3111 "mov %ebx,8(%esp)");
3112 /* Put the first argument on the stack. */
3113 buildaddr
= current_insn_ptr
;
3115 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3118 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3120 append_insns (&buildaddr
, i
, buf
);
3121 current_insn_ptr
= buildaddr
;
3122 i386_emit_call (fn
);
3123 EMIT_ASM32 (i386_void_call_2_b
,
3124 "lea 0x10(%esp),%esp\n\t"
3125 /* Restore original stack top. */
3131 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3134 /* Check low half first, more likely to be decider */
3135 "cmpl %eax,(%esp)\n\t"
3136 "jne .Leq_fallthru\n\t"
3137 "cmpl %ebx,4(%esp)\n\t"
3138 "jne .Leq_fallthru\n\t"
3139 "lea 0x8(%esp),%esp\n\t"
3142 /* jmp, but don't trust the assembler to choose the right jump */
3143 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3144 ".Leq_fallthru:\n\t"
3145 "lea 0x8(%esp),%esp\n\t"
3156 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3159 /* Check low half first, more likely to be decider */
3160 "cmpl %eax,(%esp)\n\t"
3162 "cmpl %ebx,4(%esp)\n\t"
3163 "je .Lne_fallthru\n\t"
3165 "lea 0x8(%esp),%esp\n\t"
3168 /* jmp, but don't trust the assembler to choose the right jump */
3169 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3170 ".Lne_fallthru:\n\t"
3171 "lea 0x8(%esp),%esp\n\t"
3182 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3185 "cmpl %ebx,4(%esp)\n\t"
3187 "jne .Llt_fallthru\n\t"
3188 "cmpl %eax,(%esp)\n\t"
3189 "jnl .Llt_fallthru\n\t"
3191 "lea 0x8(%esp),%esp\n\t"
3194 /* jmp, but don't trust the assembler to choose the right jump */
3195 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3196 ".Llt_fallthru:\n\t"
3197 "lea 0x8(%esp),%esp\n\t"
3208 i386_emit_le_goto (int *offset_p
, int *size_p
)
3211 "cmpl %ebx,4(%esp)\n\t"
3213 "jne .Lle_fallthru\n\t"
3214 "cmpl %eax,(%esp)\n\t"
3215 "jnle .Lle_fallthru\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3220 /* jmp, but don't trust the assembler to choose the right jump */
3221 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3222 ".Lle_fallthru:\n\t"
3223 "lea 0x8(%esp),%esp\n\t"
3234 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3237 "cmpl %ebx,4(%esp)\n\t"
3239 "jne .Lgt_fallthru\n\t"
3240 "cmpl %eax,(%esp)\n\t"
3241 "jng .Lgt_fallthru\n\t"
3243 "lea 0x8(%esp),%esp\n\t"
3246 /* jmp, but don't trust the assembler to choose the right jump */
3247 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3248 ".Lgt_fallthru:\n\t"
3249 "lea 0x8(%esp),%esp\n\t"
3260 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3263 "cmpl %ebx,4(%esp)\n\t"
3265 "jne .Lge_fallthru\n\t"
3266 "cmpl %eax,(%esp)\n\t"
3267 "jnge .Lge_fallthru\n\t"
3269 "lea 0x8(%esp),%esp\n\t"
3272 /* jmp, but don't trust the assembler to choose the right jump */
3273 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3274 ".Lge_fallthru:\n\t"
3275 "lea 0x8(%esp),%esp\n\t"
3285 struct emit_ops i386_emit_ops
=
3293 i386_emit_rsh_signed
,
3294 i386_emit_rsh_unsigned
,
3302 i386_emit_less_signed
,
3303 i386_emit_less_unsigned
,
3307 i386_write_goto_address
,
3312 i386_emit_stack_flush
,
3315 i386_emit_stack_adjust
,
3316 i386_emit_int_call_1
,
3317 i386_emit_void_call_2
,
3327 static struct emit_ops
*
3331 if (is_64bit_tdesc ())
3332 return &amd64_emit_ops
;
3335 return &i386_emit_ops
;
3339 x86_supports_range_stepping (void)
3344 /* This is initialized assuming an amd64 target.
3345 x86_arch_setup will correct it for i386 or amd64 targets. */
3347 struct linux_target_ops the_low_target
=
3350 x86_linux_regs_info
,
3351 x86_cannot_fetch_register
,
3352 x86_cannot_store_register
,
3353 NULL
, /* fetch_register */
3363 x86_stopped_by_watchpoint
,
3364 x86_stopped_data_address
,
3365 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3366 native i386 case (no registers smaller than an xfer unit), and are not
3367 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3370 /* need to fix up i386 siginfo if host is amd64 */
3372 x86_linux_new_process
,
3373 x86_linux_new_thread
,
3374 x86_linux_prepare_to_resume
,
3375 x86_linux_process_qsupported
,
3376 x86_supports_tracepoints
,
3377 x86_get_thread_area
,
3378 x86_install_fast_tracepoint_jump_pad
,
3380 x86_get_min_fast_tracepoint_insn_len
,
3381 x86_supports_range_stepping
,
3385 initialize_low_arch (void)
3387 /* Initialize the Linux target descriptions. */
3389 init_registers_amd64_linux ();
3390 init_registers_amd64_avx_linux ();
3391 init_registers_amd64_mpx_linux ();
3393 init_registers_x32_linux ();
3394 init_registers_x32_avx_linux ();
3396 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3397 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3398 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3400 init_registers_i386_linux ();
3401 init_registers_i386_mmx_linux ();
3402 init_registers_i386_avx_linux ();
3403 init_registers_i386_mpx_linux ();
3405 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3406 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3407 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3409 initialize_regsets_info (&x86_regsets_info
);